hip_filename
stringlengths
5
84
hip_content
stringlengths
79
9.69M
cuda_filename
stringlengths
4
83
cuda_content
stringlengths
19
9.69M
4dde0ef29829b73052f4f105b4c11532ae9559cb.hip
// !!! This is a file automatically generated by hipify!!! #include <ctype.h> #include <stdio.h> #include <stdlib.h> #include <unistd.h> #include <sys/times.h> #include <time.h> #include <math.h> #include <hip/hip_runtime.h> #define PI 3.14159265358979323846 #define FactorArcosegRad 0.00000484814 #define BLOQUESIZE 4 clock_t timestart, timeend; /** @brief Funcin que transforma un valor en arco segundo a radianes @param deltax: Valor numrico a transformar @returns Valor correspondiente a la entrada en radianes */ float arcoseg_radian(float deltax){ return FactorArcosegRad*deltax; } /** @brief Funcin que lee el archivo de entrada @param archivo: puntero al archivo a leer @param tamano: Numero de visibilidades del archivo a leer @returns */ double* readFile(FILE* archivo, int tamano){ double* elementos =(double*) malloc(sizeof(double)*4*tamano); fread(elementos, tamano*4, sizeof(double), archivo); return elementos; } /** @brief Funcin ejecuta el proceso de gridding @param U: Valores de la coordenada U en el plano de Fourier @param V: Valores de la coordenada V en el plano de Fourier @param R: Valores reales de la visibilidad en el plano de Fourier @param I: Valores imaginarios la visibilidad en el plano de Fourier @param num_datos: Cantidad de visibilidades ingresadas o dimensin de los vectores anteriores @param tamano: Lado de la matriz a construir, si tamano es 512 se construye una matriz de 512X512 @param V: Valores de la coordenada V en el plano de Fourier @param deltaU: Valor delta necesario para determinar la vecindad de cada pixel de la grilla regular @param r: vector de valores reales de la salida del proceso de gridding @param k: vector de valores imaginarios de la salida del proceso de gridding @returns */ __global__ void gridding_process(float *U, float *V, float *R, float *I, int num_datos, int tamano, float deltaU, float *r, float *k) { int i = blockDim.x * blockIdx.x + threadIdx.x; if(i<num_datos) { float x, y, modx, mody; x = U[i]/deltaU+tamano/2; y = V[i]/deltaU+tamano/2; modx = U[i] - x*deltaU; mody = V[i] - y*deltaU; if(modx>deltaU/2){ x+=1; } if (mody>deltaU/2) { y+=1; } if ((int)x<tamano && (int)y<tamano) { atomicAdd(&r[(int)y*tamano+(int)x], R[i]); atomicAdd(&k[(int)y*tamano+(int)x], I[i]); } } } __host__ unsigned long upper_power_of_two(unsigned long v) { v--; v |= v >> 1; v |= v >> 2; v |= v >> 4; v |= v >> 8; v |= v >> 16; v++; return v; } int main(int argc, char * const argv[]) { int tamano;//tamao de imagen int numdatos;//nmero de pasos float deltaX_arcoseg, deltaX_radian; float deltaU; char* archivo_entrada=NULL; char* archivo_salida=NULL; char* archivo_salida_i; int i, c; opterr = 0; while ((c = getopt (argc, argv, "i:z:d:N:o:")) != -1) switch (c) { case 'i': archivo_entrada = optarg; break; case 'z': numdatos = atoi(optarg); break; case 'd': deltaX_arcoseg = atof(optarg); break; case 'N': tamano = atoi(optarg); break; case 'o': archivo_salida = optarg; break; case '?': if (optopt == 'i' ||optopt == 'z' ||optopt == 'd'||optopt == 'N' ||optopt == 'o') fprintf (stderr, "Opcion -%c requiere un argumento.\n", optopt); else if (isprint (optopt)) fprintf (stderr, "Opcion desconocida `-%c'.\n", optopt); else fprintf (stderr, "Carater opcion desconocido `\\x%x'.\n", optopt); return 1; default: abort (); } /** Comprobacin de Inputs - Valores mayores que cero - Cadenas no nulas **/ if(tamano<=0){ printf("El parametro -N debe existir y ser mayor que 0\n"); exit(1); } if(numdatos==0){ printf("El parametro -z debe existir y ser mayor que 0\n"); exit(1); } if(deltaX_arcoseg==0){ printf("El parametro -d debe existir y ser mayor que 0\n"); exit(1); } if(archivo_entrada==NULL){ printf("Debe especificarse un archivo de entrada\n"); } if(archivo_salida==NULL){ printf("Debe especificarse un archivo de salida\n"); } //Transformacion de unidades necesaria para calcular delta U deltaX_radian = arcoseg_radian(deltaX_arcoseg); //Determina delta U/V a utilizar deltaU = 1/(tamano*deltaX_radian); //Medicin de tiempo de computo timestart = clock(); //Lectura de entrada FILE *entrada = fopen(archivo_entrada,"r"); double* data = readFile(entrada,numdatos); fclose(entrada); //Creando arrays para coordenada X, Y, R e I float *X = (float*)malloc(sizeof(float)*numdatos); float *Y = (float*)malloc(sizeof(float)*numdatos); float *R = (float*)malloc(sizeof(float)*numdatos); float *I = (float*)malloc(sizeof(float)*numdatos); //Quizas necesite dos vectores adicionales para el gridding [matrices desenroyadas] float *r = (float*)malloc(sizeof(float)*tamano*tamano); float *k = (float*)malloc(sizeof(float)*tamano*tamano); //Se asigan los valores correspondientes de la lectura for (i = 0; i < numdatos; i++) { X[i] = (float)data[i]; Y[i] = (float)data[i+numdatos]; R[i] = (float)data[i+2*numdatos]; I[i] = (float)data[i+3*numdatos]; } for (i = 0; i < tamano*tamano; ++i) { r[i] = 0; k[i] = 0; } //se declaran las variables CUDA float *C_X; float *C_Y; float *C_R; float *C_I; float *C_r; float *C_k; //Se reserva memoria CUDA hipMalloc( (void**)&C_X, numdatos*sizeof(float)); hipMalloc( (void**)&C_Y, numdatos*sizeof(float)); hipMalloc( (void**)&C_R, numdatos*sizeof(float)); hipMalloc( (void**)&C_I, numdatos*sizeof(float)); hipMalloc( (void**)&C_r, tamano*tamano*sizeof(float)); hipMalloc( (void**)&C_k, tamano*tamano*sizeof(float)); //se copia la matriz iniciada en las matrices de trabajo en memoria global GPU hipMemcpy( C_X, X, numdatos*sizeof(float), hipMemcpyHostToDevice); hipMemcpy( C_Y, Y, numdatos*sizeof(float), hipMemcpyHostToDevice); hipMemcpy( C_R, R, numdatos*sizeof(float), hipMemcpyHostToDevice); hipMemcpy( C_I, I, numdatos*sizeof(float), hipMemcpyHostToDevice); hipMemcpy( C_r, r, tamano*tamano*sizeof(float), hipMemcpyHostToDevice); hipMemcpy( C_k, k, tamano*tamano*sizeof(float), hipMemcpyHostToDevice); //determino dimension para el kernel long data_size_2 = upper_power_of_two(numdatos); //Se declaran las dimenciones dim3 dimBlock(BLOQUESIZE, 1); dim3 dimGrid(data_size_2/BLOQUESIZE, 1); //se ejecuta el kernel en la GPU //printf("%d - %d - %d\n", numdatos, kernel_size, kernel_size/BLOQUESIZE); hipLaunchKernelGGL(( gridding_process), dim3(dimGrid), dim3(dimBlock), 0, 0, C_X, C_Y, C_R, C_I, numdatos, tamano, deltaU, C_r, C_k); //se espera a que terminen hipDeviceSynchronize(); //se obtiene la memoria de regreso hipMemcpy( r, C_r, tamano*tamano*sizeof(float), hipMemcpyDeviceToHost); hipMemcpy( k, C_k, tamano*tamano*sizeof(float), hipMemcpyDeviceToHost); //se libera la memoria global CUDA para que pueda ser usada por otro proceso hipFree( C_X ); hipFree( C_Y ); hipFree( C_R ); hipFree( C_I ); hipFree( C_r ); hipFree( C_k ); //Se imprime salida archivo_salida_i = (char*)malloc(sizeof(archivo_salida)*2); strcpy(archivo_salida_i, archivo_salida); FILE *f = fopen(strcat(archivo_salida, "real.raw"),"wb"); FILE *g = fopen(strcat(archivo_salida_i, "img.raw"),"wb"); fwrite(r, tamano*tamano, sizeof(float),f); fwrite(k, tamano*tamano, sizeof(float),g); fclose(f); fclose(g); //Se mide el tiempo utilizado timeend = clock(); printf("Total = %f\n", (double) (timeend-timestart)/(double)CLOCKS_PER_SEC); return EXIT_SUCCESS; }
4dde0ef29829b73052f4f105b4c11532ae9559cb.cu
#include <ctype.h> #include <stdio.h> #include <stdlib.h> #include <unistd.h> #include <sys/times.h> #include <time.h> #include <math.h> #include <cuda_runtime.h> #define PI 3.14159265358979323846 #define FactorArcosegRad 0.00000484814 #define BLOQUESIZE 4 clock_t timestart, timeend; /** @brief Función que transforma un valor en arco segundo a radianes @param deltax: Valor numérico a transformar @returns Valor correspondiente a la entrada en radianes */ float arcoseg_radian(float deltax){ return FactorArcosegRad*deltax; } /** @brief Función que lee el archivo de entrada @param archivo: puntero al archivo a leer @param tamano: Numero de visibilidades del archivo a leer @returns */ double* readFile(FILE* archivo, int tamano){ double* elementos =(double*) malloc(sizeof(double)*4*tamano); fread(elementos, tamano*4, sizeof(double), archivo); return elementos; } /** @brief Función ejecuta el proceso de gridding @param U: Valores de la coordenada U en el plano de Fourier @param V: Valores de la coordenada V en el plano de Fourier @param R: Valores reales de la visibilidad en el plano de Fourier @param I: Valores imaginarios la visibilidad en el plano de Fourier @param num_datos: Cantidad de visibilidades ingresadas o dimensión de los vectores anteriores @param tamano: Lado de la matriz a construir, si tamano es 512 se construye una matriz de 512X512 @param V: Valores de la coordenada V en el plano de Fourier @param deltaU: Valor delta necesario para determinar la vecindad de cada pixel de la grilla regular @param r: vector de valores reales de la salida del proceso de gridding @param k: vector de valores imaginarios de la salida del proceso de gridding @returns */ __global__ void gridding_process(float *U, float *V, float *R, float *I, int num_datos, int tamano, float deltaU, float *r, float *k) { int i = blockDim.x * blockIdx.x + threadIdx.x; if(i<num_datos) { float x, y, modx, mody; x = U[i]/deltaU+tamano/2; y = V[i]/deltaU+tamano/2; modx = U[i] - x*deltaU; mody = V[i] - y*deltaU; if(modx>deltaU/2){ x+=1; } if (mody>deltaU/2) { y+=1; } if ((int)x<tamano && (int)y<tamano) { atomicAdd(&r[(int)y*tamano+(int)x], R[i]); atomicAdd(&k[(int)y*tamano+(int)x], I[i]); } } } __host__ unsigned long upper_power_of_two(unsigned long v) { v--; v |= v >> 1; v |= v >> 2; v |= v >> 4; v |= v >> 8; v |= v >> 16; v++; return v; } int main(int argc, char * const argv[]) { int tamano;//tamaño de imagen int numdatos;//número de pasos float deltaX_arcoseg, deltaX_radian; float deltaU; char* archivo_entrada=NULL; char* archivo_salida=NULL; char* archivo_salida_i; int i, c; opterr = 0; while ((c = getopt (argc, argv, "i:z:d:N:o:")) != -1) switch (c) { case 'i': archivo_entrada = optarg; break; case 'z': numdatos = atoi(optarg); break; case 'd': deltaX_arcoseg = atof(optarg); break; case 'N': tamano = atoi(optarg); break; case 'o': archivo_salida = optarg; break; case '?': if (optopt == 'i' ||optopt == 'z' ||optopt == 'd'||optopt == 'N' ||optopt == 'o') fprintf (stderr, "Opcion -%c requiere un argumento.\n", optopt); else if (isprint (optopt)) fprintf (stderr, "Opcion desconocida `-%c'.\n", optopt); else fprintf (stderr, "Carater opcion desconocido `\\x%x'.\n", optopt); return 1; default: abort (); } /** Comprobación de Inputs - Valores mayores que cero - Cadenas no nulas **/ if(tamano<=0){ printf("El parametro -N debe existir y ser mayor que 0\n"); exit(1); } if(numdatos==0){ printf("El parametro -z debe existir y ser mayor que 0\n"); exit(1); } if(deltaX_arcoseg==0){ printf("El parametro -d debe existir y ser mayor que 0\n"); exit(1); } if(archivo_entrada==NULL){ printf("Debe especificarse un archivo de entrada\n"); } if(archivo_salida==NULL){ printf("Debe especificarse un archivo de salida\n"); } //Transformacion de unidades necesaria para calcular delta U deltaX_radian = arcoseg_radian(deltaX_arcoseg); //Determina delta U/V a utilizar deltaU = 1/(tamano*deltaX_radian); //Medición de tiempo de computo timestart = clock(); //Lectura de entrada FILE *entrada = fopen(archivo_entrada,"r"); double* data = readFile(entrada,numdatos); fclose(entrada); //Creando arrays para coordenada X, Y, R e I float *X = (float*)malloc(sizeof(float)*numdatos); float *Y = (float*)malloc(sizeof(float)*numdatos); float *R = (float*)malloc(sizeof(float)*numdatos); float *I = (float*)malloc(sizeof(float)*numdatos); //Quizas necesite dos vectores adicionales para el gridding [matrices desenroyadas] float *r = (float*)malloc(sizeof(float)*tamano*tamano); float *k = (float*)malloc(sizeof(float)*tamano*tamano); //Se asigan los valores correspondientes de la lectura for (i = 0; i < numdatos; i++) { X[i] = (float)data[i]; Y[i] = (float)data[i+numdatos]; R[i] = (float)data[i+2*numdatos]; I[i] = (float)data[i+3*numdatos]; } for (i = 0; i < tamano*tamano; ++i) { r[i] = 0; k[i] = 0; } //se declaran las variables CUDA float *C_X; float *C_Y; float *C_R; float *C_I; float *C_r; float *C_k; //Se reserva memoria CUDA cudaMalloc( (void**)&C_X, numdatos*sizeof(float)); cudaMalloc( (void**)&C_Y, numdatos*sizeof(float)); cudaMalloc( (void**)&C_R, numdatos*sizeof(float)); cudaMalloc( (void**)&C_I, numdatos*sizeof(float)); cudaMalloc( (void**)&C_r, tamano*tamano*sizeof(float)); cudaMalloc( (void**)&C_k, tamano*tamano*sizeof(float)); //se copia la matriz iniciada en las matrices de trabajo en memoria global GPU cudaMemcpy( C_X, X, numdatos*sizeof(float), cudaMemcpyHostToDevice); cudaMemcpy( C_Y, Y, numdatos*sizeof(float), cudaMemcpyHostToDevice); cudaMemcpy( C_R, R, numdatos*sizeof(float), cudaMemcpyHostToDevice); cudaMemcpy( C_I, I, numdatos*sizeof(float), cudaMemcpyHostToDevice); cudaMemcpy( C_r, r, tamano*tamano*sizeof(float), cudaMemcpyHostToDevice); cudaMemcpy( C_k, k, tamano*tamano*sizeof(float), cudaMemcpyHostToDevice); //determino dimension para el kernel long data_size_2 = upper_power_of_two(numdatos); //Se declaran las dimenciones dim3 dimBlock(BLOQUESIZE, 1); dim3 dimGrid(data_size_2/BLOQUESIZE, 1); //se ejecuta el kernel en la GPU //printf("%d - %d - %d\n", numdatos, kernel_size, kernel_size/BLOQUESIZE); gridding_process<<<dimGrid, dimBlock>>>(C_X, C_Y, C_R, C_I, numdatos, tamano, deltaU, C_r, C_k); //se espera a que terminen cudaDeviceSynchronize(); //se obtiene la memoria de regreso cudaMemcpy( r, C_r, tamano*tamano*sizeof(float), cudaMemcpyDeviceToHost); cudaMemcpy( k, C_k, tamano*tamano*sizeof(float), cudaMemcpyDeviceToHost); //se libera la memoria global CUDA para que pueda ser usada por otro proceso cudaFree( C_X ); cudaFree( C_Y ); cudaFree( C_R ); cudaFree( C_I ); cudaFree( C_r ); cudaFree( C_k ); //Se imprime salida archivo_salida_i = (char*)malloc(sizeof(archivo_salida)*2); strcpy(archivo_salida_i, archivo_salida); FILE *f = fopen(strcat(archivo_salida, "real.raw"),"wb"); FILE *g = fopen(strcat(archivo_salida_i, "img.raw"),"wb"); fwrite(r, tamano*tamano, sizeof(float),f); fwrite(k, tamano*tamano, sizeof(float),g); fclose(f); fclose(g); //Se mide el tiempo utilizado timeend = clock(); printf("Total = %f\n", (double) (timeend-timestart)/(double)CLOCKS_PER_SEC); return EXIT_SUCCESS; }
8c94a5a30370517f70a19baf03e954d82bf9b284.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include <stdio.h> __device__ void mul(double a, double b, double *res) { *res = a * b; // NaN *res = (*res)-(*res) / (*res)-(*res); } __global__ void dot_prod(double *x, double *y, int size) { double d; for (int i=0; i < size; ++i) { double tmp; mul(x[i], y[i], &tmp); d += tmp; } int tid = blockIdx.x * blockDim.x + threadIdx.x; if (tid == 0) { printf("dot: %f\n", d); } }
8c94a5a30370517f70a19baf03e954d82bf9b284.cu
#include <stdio.h> __device__ void mul(double a, double b, double *res) { *res = a * b; // NaN *res = (*res)-(*res) / (*res)-(*res); } __global__ void dot_prod(double *x, double *y, int size) { double d; for (int i=0; i < size; ++i) { double tmp; mul(x[i], y[i], &tmp); d += tmp; } int tid = blockIdx.x * blockDim.x + threadIdx.x; if (tid == 0) { printf("dot: %f\n", d); } }
d4d75da6774cffba6ee472656e43389e87298d88.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" // ------------------------------------------------------------------ // Flow-Guided Feature Aggregation // Copyright (c) 2017 Microsoft // Licensed under The MIT License // Written by Yuwen Xiong // ------------------------------------------------------------------ // Based on: // Faster R-CNN // Copyright (c) 2015 Microsoft // Licensed under The MIT License // https://github.com/shaoqingren/faster_rcnn // ------------------------------------------------------------------ //#include "gpu_nms.hpp" #include <vector> #include <iostream> #define CUDA_CHECK(condition) \ /* Code block avoids redefinition of hipError_t error */ \ do { \ hipError_t error = condition; \ if (error != hipSuccess) { \ std::cout << hipGetErrorString(error) << std::endl; \ } \ } while (0) #define DIVUP(m,n) ((m) / (n) + ((m) % (n) > 0)) int const threadsPerBlock = sizeof(unsigned long long) * 8; __device__ inline float devIoU(float const * const a, float const * const b) { float left = max(a[0], b[0]), right = min(a[2], b[2]); float top = max(a[1], b[1]), bottom = min(a[3], b[3]); float width = max(right - left + 1, 0.f), height = max(bottom - top + 1, 0.f); float interS = width * height; float Sa = (a[2] - a[0] + 1) * (a[3] - a[1] + 1); float Sb = (b[2] - b[0] + 1) * (b[3] - b[1] + 1); return interS / (Sa + Sb - interS); } __global__ void nms_kernel(const int n_boxes, const float nms_overlap_thresh, const float *dev_boxes, unsigned long long *dev_mask) { const int row_start = blockIdx.y; const int col_start = blockIdx.x; // if (row_start > col_start) return; const int row_size = min(n_boxes - row_start * threadsPerBlock, threadsPerBlock); const int col_size = min(n_boxes - col_start * threadsPerBlock, threadsPerBlock); __shared__ float block_boxes[threadsPerBlock * 5]; if (threadIdx.x < col_size) { block_boxes[threadIdx.x * 5 + 0] = dev_boxes[(threadsPerBlock * col_start + threadIdx.x) * 5 + 0]; block_boxes[threadIdx.x * 5 + 1] = dev_boxes[(threadsPerBlock * col_start + threadIdx.x) * 5 + 1]; block_boxes[threadIdx.x * 5 + 2] = dev_boxes[(threadsPerBlock * col_start + threadIdx.x) * 5 + 2]; block_boxes[threadIdx.x * 5 + 3] = dev_boxes[(threadsPerBlock * col_start + threadIdx.x) * 5 + 3]; block_boxes[threadIdx.x * 5 + 4] = dev_boxes[(threadsPerBlock * col_start + threadIdx.x) * 5 + 4]; } __syncthreads(); if (threadIdx.x < row_size) { const int cur_box_idx = threadsPerBlock * row_start + threadIdx.x; const float *cur_box = dev_boxes + cur_box_idx * 5; int i = 0; unsigned long long t = 0; int start = 0; if (row_start == col_start) { start = threadIdx.x + 1; } for (i = start; i < col_size; i++) { if (devIoU(cur_box, block_boxes + i * 5) > nms_overlap_thresh) { t |= 1ULL << i; } } const int col_blocks = DIVUP(n_boxes, threadsPerBlock); dev_mask[cur_box_idx * col_blocks + col_start] = t; } } void _set_device(int device_id) { int current_device; CUDA_CHECK(hipGetDevice(&current_device)); if (current_device == device_id) { return; } // The call to hipSetDevice must come before any calls to Get, which // may perform initialization using the GPU. CUDA_CHECK(hipSetDevice(device_id)); } void _nms(long* keep_out, int* num_out, const float* boxes_host, int boxes_num, int boxes_dim, float nms_overlap_thresh, int device_id) { _set_device(device_id); float* boxes_dev = NULL; unsigned long long* mask_dev = NULL; const int col_blocks = DIVUP(boxes_num, threadsPerBlock); CUDA_CHECK(hipMalloc(&boxes_dev, boxes_num * boxes_dim * sizeof(float))); CUDA_CHECK(hipMemcpy(boxes_dev, boxes_host, boxes_num * boxes_dim * sizeof(float), hipMemcpyHostToDevice)); CUDA_CHECK(hipMalloc(&mask_dev, boxes_num * col_blocks * sizeof(unsigned long long))); dim3 blocks(DIVUP(boxes_num, threadsPerBlock), DIVUP(boxes_num, threadsPerBlock)); dim3 threads(threadsPerBlock); hipLaunchKernelGGL(( nms_kernel), dim3(blocks), dim3(threads), 0, 0, boxes_num, nms_overlap_thresh, boxes_dev, mask_dev); std::vector<unsigned long long> mask_host(boxes_num * col_blocks); CUDA_CHECK(hipMemcpy(&mask_host[0], mask_dev, sizeof(unsigned long long) * boxes_num * col_blocks, hipMemcpyDeviceToHost)); std::vector<unsigned long long> remv(col_blocks); memset(&remv[0], 0, sizeof(unsigned long long) * col_blocks); int num_to_keep = 0; for (int i = 0; i < boxes_num; i++) { int nblock = i / threadsPerBlock; int inblock = i % threadsPerBlock; if (!(remv[nblock] & (1ULL << inblock))) { keep_out[num_to_keep++] = i; unsigned long long *p = &mask_host[0] + i * col_blocks; for (int j = nblock; j < col_blocks; j++) { remv[j] |= p[j]; } } } *num_out = num_to_keep; CUDA_CHECK(hipFree(boxes_dev)); CUDA_CHECK(hipFree(mask_dev)); } /* Generated by Cython 0.24 */ #define PY_SSIZE_T_CLEAN #include "Python.h" #ifndef Py_PYTHON_H #error Python headers needed to compile C extensions, please install development version of Python. #elif PY_VERSION_HEX < 0x02060000 || (0x03000000 <= PY_VERSION_HEX && PY_VERSION_HEX < 0x03020000) #error Cython requires Python 2.6+ or Python 3.2+. #else #define CYTHON_ABI "0_24" #include <stddef.h> #ifndef offsetof #define offsetof(type, member) ( (size_t) & ((type*)0) -> member ) #endif #if !defined(WIN32) && !defined(MS_WINDOWS) #ifndef __stdcall #define __stdcall #endif #ifndef __cdecl #define __cdecl #endif #ifndef __fastcall #define __fastcall #endif #endif #ifndef DL_IMPORT #define DL_IMPORT(t) t #endif #ifndef DL_EXPORT #define DL_EXPORT(t) t #endif #ifndef PY_LONG_LONG #define PY_LONG_LONG LONG_LONG #endif #ifndef Py_HUGE_VAL #define Py_HUGE_VAL HUGE_VAL #endif #ifdef PYPY_VERSION #define CYTHON_COMPILING_IN_PYPY 1 #define CYTHON_COMPILING_IN_CPYTHON 0 #else #define CYTHON_COMPILING_IN_PYPY 0 #define CYTHON_COMPILING_IN_CPYTHON 1 #endif #if !defined(CYTHON_USE_PYLONG_INTERNALS) && CYTHON_COMPILING_IN_CPYTHON && PY_VERSION_HEX >= 0x02070000 #define CYTHON_USE_PYLONG_INTERNALS 1 #endif #if CYTHON_USE_PYLONG_INTERNALS #include "longintrepr.h" #undef SHIFT #undef BASE #undef MASK #endif #if CYTHON_COMPILING_IN_PYPY && PY_VERSION_HEX < 0x02070600 && !defined(Py_OptimizeFlag) #define Py_OptimizeFlag 0 #endif #define __PYX_BUILD_PY_SSIZE_T "n" #define CYTHON_FORMAT_SSIZE_T "z" #if PY_MAJOR_VERSION < 3 #define __Pyx_BUILTIN_MODULE_NAME "__builtin__" #define __Pyx_PyCode_New(a, k, l, s, f, code, c, n, v, fv, cell, fn, name, fline, lnos)\ PyCode_New(a+k, l, s, f, code, c, n, v, fv, cell, fn, name, fline, lnos) #define __Pyx_DefaultClassType PyClass_Type #else #define __Pyx_BUILTIN_MODULE_NAME "builtins" #define __Pyx_PyCode_New(a, k, l, s, f, code, c, n, v, fv, cell, fn, name, fline, lnos)\ PyCode_New(a, k, l, s, f, code, c, n, v, fv, cell, fn, name, fline, lnos) #define __Pyx_DefaultClassType PyType_Type #endif #ifndef Py_TPFLAGS_CHECKTYPES #define Py_TPFLAGS_CHECKTYPES 0 #endif #ifndef Py_TPFLAGS_HAVE_INDEX #define Py_TPFLAGS_HAVE_INDEX 0 #endif #ifndef Py_TPFLAGS_HAVE_NEWBUFFER #define Py_TPFLAGS_HAVE_NEWBUFFER 0 #endif #ifndef Py_TPFLAGS_HAVE_FINALIZE #define Py_TPFLAGS_HAVE_FINALIZE 0 #endif #if PY_VERSION_HEX > 0x03030000 && defined(PyUnicode_KIND) #define CYTHON_PEP393_ENABLED 1 #define __Pyx_PyUnicode_READY(op) (likely(PyUnicode_IS_READY(op)) ?\ 0 : _PyUnicode_Ready((PyObject *)(op))) #define __Pyx_PyUnicode_GET_LENGTH(u) PyUnicode_GET_LENGTH(u) #define __Pyx_PyUnicode_READ_CHAR(u, i) PyUnicode_READ_CHAR(u, i) #define __Pyx_PyUnicode_KIND(u) PyUnicode_KIND(u) #define __Pyx_PyUnicode_DATA(u) PyUnicode_DATA(u) #define __Pyx_PyUnicode_READ(k, d, i) PyUnicode_READ(k, d, i) #define __Pyx_PyUnicode_IS_TRUE(u) (0 != (likely(PyUnicode_IS_READY(u)) ? PyUnicode_GET_LENGTH(u) : PyUnicode_GET_SIZE(u))) #else #define CYTHON_PEP393_ENABLED 0 #define __Pyx_PyUnicode_READY(op) (0) #define __Pyx_PyUnicode_GET_LENGTH(u) PyUnicode_GET_SIZE(u) #define __Pyx_PyUnicode_READ_CHAR(u, i) ((Py_UCS4)(PyUnicode_AS_UNICODE(u)[i])) #define __Pyx_PyUnicode_KIND(u) (sizeof(Py_UNICODE)) #define __Pyx_PyUnicode_DATA(u) ((void*)PyUnicode_AS_UNICODE(u)) #define __Pyx_PyUnicode_READ(k, d, i) ((void)(k), (Py_UCS4)(((Py_UNICODE*)d)[i])) #define __Pyx_PyUnicode_IS_TRUE(u) (0 != PyUnicode_GET_SIZE(u)) #endif #if CYTHON_COMPILING_IN_PYPY #define __Pyx_PyUnicode_Concat(a, b) PyNumber_Add(a, b) #define __Pyx_PyUnicode_ConcatSafe(a, b) PyNumber_Add(a, b) #else #define __Pyx_PyUnicode_Concat(a, b) PyUnicode_Concat(a, b) #define __Pyx_PyUnicode_ConcatSafe(a, b) ((unlikely((a) == Py_None) || unlikely((b) == Py_None)) ?\ PyNumber_Add(a, b) : __Pyx_PyUnicode_Concat(a, b)) #endif #if CYTHON_COMPILING_IN_PYPY && !defined(PyUnicode_Contains) #define PyUnicode_Contains(u, s) PySequence_Contains(u, s) #endif #if CYTHON_COMPILING_IN_PYPY && !defined(PyObject_Format) #define PyObject_Format(obj, fmt) PyObject_CallMethod(obj, "__format__", "O", fmt) #endif #if CYTHON_COMPILING_IN_PYPY && !defined(PyObject_Malloc) #define PyObject_Malloc(s) PyMem_Malloc(s) #define PyObject_Free(p) PyMem_Free(p) #define PyObject_Realloc(p) PyMem_Realloc(p) #endif #define __Pyx_PyString_FormatSafe(a, b) ((unlikely((a) == Py_None)) ? PyNumber_Remainder(a, b) : __Pyx_PyString_Format(a, b)) #define __Pyx_PyUnicode_FormatSafe(a, b) ((unlikely((a) == Py_None)) ? PyNumber_Remainder(a, b) : PyUnicode_Format(a, b)) #if PY_MAJOR_VERSION >= 3 #define __Pyx_PyString_Format(a, b) PyUnicode_Format(a, b) #else #define __Pyx_PyString_Format(a, b) PyString_Format(a, b) #endif #if PY_MAJOR_VERSION < 3 && !defined(PyObject_ASCII) #define PyObject_ASCII(o) PyObject_Repr(o) #endif #if PY_MAJOR_VERSION >= 3 #define PyBaseString_Type PyUnicode_Type #define PyStringObject PyUnicodeObject #define PyString_Type PyUnicode_Type #define PyString_Check PyUnicode_Check #define PyString_CheckExact PyUnicode_CheckExact #endif #if PY_MAJOR_VERSION >= 3 #define __Pyx_PyBaseString_Check(obj) PyUnicode_Check(obj) #define __Pyx_PyBaseString_CheckExact(obj) PyUnicode_CheckExact(obj) #else #define __Pyx_PyBaseString_Check(obj) (PyString_Check(obj) || PyUnicode_Check(obj)) #define __Pyx_PyBaseString_CheckExact(obj) (PyString_CheckExact(obj) || PyUnicode_CheckExact(obj)) #endif #ifndef PySet_CheckExact #define PySet_CheckExact(obj) (Py_TYPE(obj) == &PySet_Type) #endif #define __Pyx_TypeCheck(obj, type) PyObject_TypeCheck(obj, (PyTypeObject *)type) #if PY_MAJOR_VERSION >= 3 #define PyIntObject PyLongObject #define PyInt_Type PyLong_Type #define PyInt_Check(op) PyLong_Check(op) #define PyInt_CheckExact(op) PyLong_CheckExact(op) #define PyInt_FromString PyLong_FromString #define PyInt_FromUnicode PyLong_FromUnicode #define PyInt_FromLong PyLong_FromLong #define PyInt_FromSize_t PyLong_FromSize_t #define PyInt_FromSsize_t PyLong_FromSsize_t #define PyInt_AsLong PyLong_AsLong #define PyInt_AS_LONG PyLong_AS_LONG #define PyInt_AsSsize_t PyLong_AsSsize_t #define PyInt_AsUnsignedLongMask PyLong_AsUnsignedLongMask #define PyInt_AsUnsignedLongLongMask PyLong_AsUnsignedLongLongMask #define PyNumber_Int PyNumber_Long #endif #if PY_MAJOR_VERSION >= 3 #define PyBoolObject PyLongObject #endif #if PY_MAJOR_VERSION >= 3 && CYTHON_COMPILING_IN_PYPY #ifndef PyUnicode_InternFromString #define PyUnicode_InternFromString(s) PyUnicode_FromString(s) #endif #endif #if PY_VERSION_HEX < 0x030200A4 typedef long Py_hash_t; #define __Pyx_PyInt_FromHash_t PyInt_FromLong #define __Pyx_PyInt_AsHash_t PyInt_AsLong #else #define __Pyx_PyInt_FromHash_t PyInt_FromSsize_t #define __Pyx_PyInt_AsHash_t PyInt_AsSsize_t #endif #if PY_MAJOR_VERSION >= 3 #define __Pyx_PyMethod_New(func, self, klass) ((self) ? PyMethod_New(func, self) : PyInstanceMethod_New(func)) #else #define __Pyx_PyMethod_New(func, self, klass) PyMethod_New(func, self, klass) #endif #if PY_VERSION_HEX >= 0x030500B1 #define __Pyx_PyAsyncMethodsStruct PyAsyncMethods #define __Pyx_PyType_AsAsync(obj) (Py_TYPE(obj)->tp_as_async) #elif CYTHON_COMPILING_IN_CPYTHON && PY_MAJOR_VERSION >= 3 typedef struct { unaryfunc am_await; unaryfunc am_aiter; unaryfunc am_anext; } __Pyx_PyAsyncMethodsStruct; #define __Pyx_PyType_AsAsync(obj) ((__Pyx_PyAsyncMethodsStruct*) (Py_TYPE(obj)->tp_reserved)) #else #define __Pyx_PyType_AsAsync(obj) NULL #endif #ifndef CYTHON_RESTRICT #if defined(__GNUC__) #define CYTHON_RESTRICT __restrict__ #elif defined(_MSC_VER) && _MSC_VER >= 1400 #define CYTHON_RESTRICT __restrict #elif defined (__STDC_VERSION__) && __STDC_VERSION__ >= 199901L #define CYTHON_RESTRICT restrict #else #define CYTHON_RESTRICT #endif #endif #define __Pyx_void_to_None(void_result) ((void)(void_result), Py_INCREF(Py_None), Py_None) #ifndef __cplusplus #error "Cython files generated with the C++ option must be compiled with a C++ compiler." #endif #ifndef CYTHON_INLINE #define CYTHON_INLINE inline #endif template<typename T> void __Pyx_call_destructor(T& x) { x.~T(); } template<typename T> class __Pyx_FakeReference { public: __Pyx_FakeReference() : ptr(NULL) { } __Pyx_FakeReference(const T& ref) : ptr(const_cast<T*>(&ref)) { } T *operator->() { return ptr; } operator T&() { return *ptr; } private: T *ptr; }; #if defined(WIN32) || defined(MS_WINDOWS) #define _USE_MATH_DEFINES #endif #include <math.h> #ifdef NAN #define __PYX_NAN() ((float) NAN) #else static CYTHON_INLINE float __PYX_NAN() { float value; memset(&value, 0xFF, sizeof(value)); return value; } #endif #define __PYX_ERR(f_index, lineno, Ln_error) \ { \ __pyx_filename = __pyx_f[f_index]; __pyx_lineno = lineno; __pyx_clineno = __LINE__; goto Ln_error; \ } #if PY_MAJOR_VERSION >= 3 #define __Pyx_PyNumber_Divide(x,y) PyNumber_TrueDivide(x,y) #define __Pyx_PyNumber_InPlaceDivide(x,y) PyNumber_InPlaceTrueDivide(x,y) #else #define __Pyx_PyNumber_Divide(x,y) PyNumber_Divide(x,y) #define __Pyx_PyNumber_InPlaceDivide(x,y) PyNumber_InPlaceDivide(x,y) #endif #ifndef __PYX_EXTERN_C #ifdef __cplusplus #define __PYX_EXTERN_C extern "C" #else #define __PYX_EXTERN_C extern #endif #endif #define __PYX_HAVE__nms__gpu_nms #define __PYX_HAVE_API__nms__gpu_nms #include "string.h" #include "stdio.h" #include "stdlib.h" #include "numpy/arrayobject.h" #include "numpy/ufuncobject.h" #include "gpu_nms.hpp" #ifdef _OPENMP #include <omp.h> #endif /* _OPENMP */ #ifdef PYREX_WITHOUT_ASSERTIONS #define CYTHON_WITHOUT_ASSERTIONS #endif #ifndef CYTHON_UNUSED # if defined(__GNUC__) # if !(defined(__cplusplus)) || (__GNUC__ > 3 || (__GNUC__ == 3 && __GNUC_MINOR__ >= 4)) # define CYTHON_UNUSED __attribute__ ((__unused__)) # else # define CYTHON_UNUSED # endif # elif defined(__ICC) || (defined(__INTEL_COMPILER) && !defined(_MSC_VER)) # define CYTHON_UNUSED __attribute__ ((__unused__)) # else # define CYTHON_UNUSED # endif #endif #ifndef CYTHON_NCP_UNUSED # if CYTHON_COMPILING_IN_CPYTHON # define CYTHON_NCP_UNUSED # else # define CYTHON_NCP_UNUSED CYTHON_UNUSED # endif #endif typedef struct {PyObject **p; const char *s; const Py_ssize_t n; const char* encoding; const char is_unicode; const char is_str; const char intern; } __Pyx_StringTabEntry; #define __PYX_DEFAULT_STRING_ENCODING_IS_ASCII 0 #define __PYX_DEFAULT_STRING_ENCODING_IS_DEFAULT 0 #define __PYX_DEFAULT_STRING_ENCODING "" #define __Pyx_PyObject_FromString __Pyx_PyBytes_FromString #define __Pyx_PyObject_FromStringAndSize __Pyx_PyBytes_FromStringAndSize #define __Pyx_uchar_cast(c) ((unsigned char)c) #define __Pyx_long_cast(x) ((long)x) #define __Pyx_fits_Py_ssize_t(v, type, is_signed) (\ (sizeof(type) < sizeof(Py_ssize_t)) ||\ (sizeof(type) > sizeof(Py_ssize_t) &&\ likely(v < (type)PY_SSIZE_T_MAX ||\ v == (type)PY_SSIZE_T_MAX) &&\ (!is_signed || likely(v > (type)PY_SSIZE_T_MIN ||\ v == (type)PY_SSIZE_T_MIN))) ||\ (sizeof(type) == sizeof(Py_ssize_t) &&\ (is_signed || likely(v < (type)PY_SSIZE_T_MAX ||\ v == (type)PY_SSIZE_T_MAX))) ) #if defined (__cplusplus) && __cplusplus >= 201103L #include <cstdlib> #define __Pyx_sst_abs(value) std::abs(value) #elif SIZEOF_INT >= SIZEOF_SIZE_T #define __Pyx_sst_abs(value) abs(value) #elif SIZEOF_LONG >= SIZEOF_SIZE_T #define __Pyx_sst_abs(value) labs(value) #elif defined (_MSC_VER) && defined (_M_X64) #define __Pyx_sst_abs(value) _abs64(value) #elif defined (__STDC_VERSION__) && __STDC_VERSION__ >= 199901L #define __Pyx_sst_abs(value) llabs(value) #elif defined (__GNUC__) #define __Pyx_sst_abs(value) __builtin_llabs(value) #else #define __Pyx_sst_abs(value) ((value<0) ? -value : value) #endif static CYTHON_INLINE char* __Pyx_PyObject_AsString(PyObject*); static CYTHON_INLINE char* __Pyx_PyObject_AsStringAndSize(PyObject*, Py_ssize_t* length); #define __Pyx_PyByteArray_FromString(s) PyByteArray_FromStringAndSize((const char*)s, strlen((const char*)s)) #define __Pyx_PyByteArray_FromStringAndSize(s, l) PyByteArray_FromStringAndSize((const char*)s, l) #define __Pyx_PyBytes_FromString PyBytes_FromString #define __Pyx_PyBytes_FromStringAndSize PyBytes_FromStringAndSize static CYTHON_INLINE PyObject* __Pyx_PyUnicode_FromString(const char*); #if PY_MAJOR_VERSION < 3 #define __Pyx_PyStr_FromString __Pyx_PyBytes_FromString #define __Pyx_PyStr_FromStringAndSize __Pyx_PyBytes_FromStringAndSize #else #define __Pyx_PyStr_FromString __Pyx_PyUnicode_FromString #define __Pyx_PyStr_FromStringAndSize __Pyx_PyUnicode_FromStringAndSize #endif #define __Pyx_PyObject_AsSString(s) ((signed char*) __Pyx_PyObject_AsString(s)) #define __Pyx_PyObject_AsUString(s) ((unsigned char*) __Pyx_PyObject_AsString(s)) #define __Pyx_PyObject_FromCString(s) __Pyx_PyObject_FromString((const char*)s) #define __Pyx_PyBytes_FromCString(s) __Pyx_PyBytes_FromString((const char*)s) #define __Pyx_PyByteArray_FromCString(s) __Pyx_PyByteArray_FromString((const char*)s) #define __Pyx_PyStr_FromCString(s) __Pyx_PyStr_FromString((const char*)s) #define __Pyx_PyUnicode_FromCString(s) __Pyx_PyUnicode_FromString((const char*)s) #if PY_MAJOR_VERSION < 3 static CYTHON_INLINE size_t __Pyx_Py_UNICODE_strlen(const Py_UNICODE *u) { const Py_UNICODE *u_end = u; while (*u_end++) ; return (size_t)(u_end - u - 1); } #else #define __Pyx_Py_UNICODE_strlen Py_UNICODE_strlen #endif #define __Pyx_PyUnicode_FromUnicode(u) PyUnicode_FromUnicode(u, __Pyx_Py_UNICODE_strlen(u)) #define __Pyx_PyUnicode_FromUnicodeAndLength PyUnicode_FromUnicode #define __Pyx_PyUnicode_AsUnicode PyUnicode_AsUnicode #define __Pyx_NewRef(obj) (Py_INCREF(obj), obj) #define __Pyx_Owned_Py_None(b) __Pyx_NewRef(Py_None) #define __Pyx_PyBool_FromLong(b) ((b) ? __Pyx_NewRef(Py_True) : __Pyx_NewRef(Py_False)) static CYTHON_INLINE int __Pyx_PyObject_IsTrue(PyObject*); static CYTHON_INLINE PyObject* __Pyx_PyNumber_IntOrLong(PyObject* x); static CYTHON_INLINE Py_ssize_t __Pyx_PyIndex_AsSsize_t(PyObject*); static CYTHON_INLINE PyObject * __Pyx_PyInt_FromSize_t(size_t); #if CYTHON_COMPILING_IN_CPYTHON #define __pyx_PyFloat_AsDouble(x) (PyFloat_CheckExact(x) ? PyFloat_AS_DOUBLE(x) : PyFloat_AsDouble(x)) #else #define __pyx_PyFloat_AsDouble(x) PyFloat_AsDouble(x) #endif #define __pyx_PyFloat_AsFloat(x) ((float) __pyx_PyFloat_AsDouble(x)) #if PY_MAJOR_VERSION >= 3 #define __Pyx_PyNumber_Int(x) (PyLong_CheckExact(x) ? __Pyx_NewRef(x) : PyNumber_Long(x)) #else #define __Pyx_PyNumber_Int(x) (PyInt_CheckExact(x) ? __Pyx_NewRef(x) : PyNumber_Int(x)) #endif #define __Pyx_PyNumber_Float(x) (PyFloat_CheckExact(x) ? __Pyx_NewRef(x) : PyNumber_Float(x)) #if PY_MAJOR_VERSION < 3 && __PYX_DEFAULT_STRING_ENCODING_IS_ASCII static int __Pyx_sys_getdefaultencoding_not_ascii; static int __Pyx_init_sys_getdefaultencoding_params(void) { PyObject* sys; PyObject* default_encoding = NULL; PyObject* ascii_chars_u = NULL; PyObject* ascii_chars_b = NULL; const char* default_encoding_c; sys = PyImport_ImportModule("sys"); if (!sys) goto bad; default_encoding = PyObject_CallMethod(sys, (char*) "getdefaultencoding", NULL); Py_DECREF(sys); if (!default_encoding) goto bad; default_encoding_c = PyBytes_AsString(default_encoding); if (!default_encoding_c) goto bad; if (strcmp(default_encoding_c, "ascii") == 0) { __Pyx_sys_getdefaultencoding_not_ascii = 0; } else { char ascii_chars[128]; int c; for (c = 0; c < 128; c++) { ascii_chars[c] = c; } __Pyx_sys_getdefaultencoding_not_ascii = 1; ascii_chars_u = PyUnicode_DecodeASCII(ascii_chars, 128, NULL); if (!ascii_chars_u) goto bad; ascii_chars_b = PyUnicode_AsEncodedString(ascii_chars_u, default_encoding_c, NULL); if (!ascii_chars_b || !PyBytes_Check(ascii_chars_b) || memcmp(ascii_chars, PyBytes_AS_STRING(ascii_chars_b), 128) != 0) { PyErr_Format( PyExc_ValueError, "This module compiled with c_string_encoding=ascii, but default encoding '%.200s' is not a superset of ascii.", default_encoding_c); goto bad; } Py_DECREF(ascii_chars_u); Py_DECREF(ascii_chars_b); } Py_DECREF(default_encoding); return 0; bad: Py_XDECREF(default_encoding); Py_XDECREF(ascii_chars_u); Py_XDECREF(ascii_chars_b); return -1; } #endif #if __PYX_DEFAULT_STRING_ENCODING_IS_DEFAULT && PY_MAJOR_VERSION >= 3 #define __Pyx_PyUnicode_FromStringAndSize(c_str, size) PyUnicode_DecodeUTF8(c_str, size, NULL) #else #define __Pyx_PyUnicode_FromStringAndSize(c_str, size) PyUnicode_Decode(c_str, size, __PYX_DEFAULT_STRING_ENCODING, NULL) #if __PYX_DEFAULT_STRING_ENCODING_IS_DEFAULT static char* __PYX_DEFAULT_STRING_ENCODING; static int __Pyx_init_sys_getdefaultencoding_params(void) { PyObject* sys; PyObject* default_encoding = NULL; char* default_encoding_c; sys = PyImport_ImportModule("sys"); if (!sys) goto bad; default_encoding = PyObject_CallMethod(sys, (char*) (const char*) "getdefaultencoding", NULL); Py_DECREF(sys); if (!default_encoding) goto bad; default_encoding_c = PyBytes_AsString(default_encoding); if (!default_encoding_c) goto bad; __PYX_DEFAULT_STRING_ENCODING = (char*) malloc(strlen(default_encoding_c)); if (!__PYX_DEFAULT_STRING_ENCODING) goto bad; strcpy(__PYX_DEFAULT_STRING_ENCODING, default_encoding_c); Py_DECREF(default_encoding); return 0; bad: Py_XDECREF(default_encoding); return -1; } #endif #endif /* Test for GCC > 2.95 */ #if defined(__GNUC__) && (__GNUC__ > 2 || (__GNUC__ == 2 && (__GNUC_MINOR__ > 95))) #define likely(x) __builtin_expect(!!(x), 1) #define unlikely(x) __builtin_expect(!!(x), 0) #else /* !__GNUC__ or GCC < 2.95 */ #define likely(x) (x) #define unlikely(x) (x) #endif /* __GNUC__ */ static PyObject *__pyx_m; static PyObject *__pyx_d; static PyObject *__pyx_b; static PyObject *__pyx_empty_tuple; static PyObject *__pyx_empty_bytes; static PyObject *__pyx_empty_unicode; static int __pyx_lineno; static int __pyx_clineno = 0; static const char * __pyx_cfilenm= __FILE__; static const char *__pyx_filename; /* None.proto */ #if !defined(CYTHON_CCOMPLEX) #if defined(__cplusplus) #define CYTHON_CCOMPLEX 1 #elif defined(_Complex_I) #define CYTHON_CCOMPLEX 1 #else #define CYTHON_CCOMPLEX 0 #endif #endif #if CYTHON_CCOMPLEX #ifdef __cplusplus #include <complex> #else #include <complex.h> #endif #endif #if CYTHON_CCOMPLEX && !defined(__cplusplus) && defined(__sun__) && defined(__GNUC__) #undef _Complex_I #define _Complex_I 1.0fj #endif static const char *__pyx_f[] = { "nms\\gpu_nms.pyx", "__init__.pxd", "type.pxd", }; /* BufferFormatStructs.proto */ #define IS_UNSIGNED(type) (((type) -1) > 0) struct __Pyx_StructField_; #define __PYX_BUF_FLAGS_PACKED_STRUCT (1 << 0) typedef struct { const char* name; struct __Pyx_StructField_* fields; size_t size; size_t arraysize[8]; int ndim; char typegroup; char is_unsigned; int flags; } __Pyx_TypeInfo; typedef struct __Pyx_StructField_ { __Pyx_TypeInfo* type; const char* name; size_t offset; } __Pyx_StructField; typedef struct { __Pyx_StructField* field; size_t parent_offset; } __Pyx_BufFmt_StackElem; typedef struct { __Pyx_StructField root; __Pyx_BufFmt_StackElem* head; size_t fmt_offset; size_t new_count, enc_count; size_t struct_alignment; int is_complex; char enc_type; char new_packmode; char enc_packmode; char is_valid_array; } __Pyx_BufFmt_Context; /* "C:/Anaconda2/lib/site-packages/Cython/Includes/numpy/__init__.pxd":725 * # in Cython to enable them only on the right systems. * * ctypedef npy_int8 int8_t # <<<<<<<<<<<<<< * ctypedef npy_int16 int16_t * ctypedef npy_int32 int32_t */ typedef npy_int8 __pyx_t_5numpy_int8_t; /* "C:/Anaconda2/lib/site-packages/Cython/Includes/numpy/__init__.pxd":726 * * ctypedef npy_int8 int8_t * ctypedef npy_int16 int16_t # <<<<<<<<<<<<<< * ctypedef npy_int32 int32_t * ctypedef npy_int64 int64_t */ typedef npy_int16 __pyx_t_5numpy_int16_t; /* "C:/Anaconda2/lib/site-packages/Cython/Includes/numpy/__init__.pxd":727 * ctypedef npy_int8 int8_t * ctypedef npy_int16 int16_t * ctypedef npy_int32 int32_t # <<<<<<<<<<<<<< * ctypedef npy_int64 int64_t * #ctypedef npy_int96 int96_t */ typedef npy_int32 __pyx_t_5numpy_int32_t; /* "C:/Anaconda2/lib/site-packages/Cython/Includes/numpy/__init__.pxd":728 * ctypedef npy_int16 int16_t * ctypedef npy_int32 int32_t * ctypedef npy_int64 int64_t # <<<<<<<<<<<<<< * #ctypedef npy_int96 int96_t * #ctypedef npy_int128 int128_t */ typedef npy_int64 __pyx_t_5numpy_int64_t; /* "C:/Anaconda2/lib/site-packages/Cython/Includes/numpy/__init__.pxd":732 * #ctypedef npy_int128 int128_t * * ctypedef npy_uint8 uint8_t # <<<<<<<<<<<<<< * ctypedef npy_uint16 uint16_t * ctypedef npy_uint32 uint32_t */ typedef npy_uint8 __pyx_t_5numpy_uint8_t; /* "C:/Anaconda2/lib/site-packages/Cython/Includes/numpy/__init__.pxd":733 * * ctypedef npy_uint8 uint8_t * ctypedef npy_uint16 uint16_t # <<<<<<<<<<<<<< * ctypedef npy_uint32 uint32_t * ctypedef npy_uint64 uint64_t */ typedef npy_uint16 __pyx_t_5numpy_uint16_t; /* "C:/Anaconda2/lib/site-packages/Cython/Includes/numpy/__init__.pxd":734 * ctypedef npy_uint8 uint8_t * ctypedef npy_uint16 uint16_t * ctypedef npy_uint32 uint32_t # <<<<<<<<<<<<<< * ctypedef npy_uint64 uint64_t * #ctypedef npy_uint96 uint96_t */ typedef npy_uint32 __pyx_t_5numpy_uint32_t; /* "C:/Anaconda2/lib/site-packages/Cython/Includes/numpy/__init__.pxd":735 * ctypedef npy_uint16 uint16_t * ctypedef npy_uint32 uint32_t * ctypedef npy_uint64 uint64_t # <<<<<<<<<<<<<< * #ctypedef npy_uint96 uint96_t * #ctypedef npy_uint128 uint128_t */ typedef npy_uint64 __pyx_t_5numpy_uint64_t; /* "C:/Anaconda2/lib/site-packages/Cython/Includes/numpy/__init__.pxd":739 * #ctypedef npy_uint128 uint128_t * * ctypedef npy_float32 float32_t # <<<<<<<<<<<<<< * ctypedef npy_float64 float64_t * #ctypedef npy_float80 float80_t */ typedef npy_float32 __pyx_t_5numpy_float32_t; /* "C:/Anaconda2/lib/site-packages/Cython/Includes/numpy/__init__.pxd":740 * * ctypedef npy_float32 float32_t * ctypedef npy_float64 float64_t # <<<<<<<<<<<<<< * #ctypedef npy_float80 float80_t * #ctypedef npy_float128 float128_t */ typedef npy_float64 __pyx_t_5numpy_float64_t; /* "C:/Anaconda2/lib/site-packages/Cython/Includes/numpy/__init__.pxd":749 * # The int types are mapped a bit surprising -- * # numpy.int corresponds to 'l' and numpy.long to 'q' * ctypedef npy_long int_t # <<<<<<<<<<<<<< * ctypedef npy_longlong long_t * ctypedef npy_longlong longlong_t */ typedef npy_long __pyx_t_5numpy_int_t; /* "C:/Anaconda2/lib/site-packages/Cython/Includes/numpy/__init__.pxd":750 * # numpy.int corresponds to 'l' and numpy.long to 'q' * ctypedef npy_long int_t * ctypedef npy_longlong long_t # <<<<<<<<<<<<<< * ctypedef npy_longlong longlong_t * */ typedef npy_longlong __pyx_t_5numpy_long_t; /* "C:/Anaconda2/lib/site-packages/Cython/Includes/numpy/__init__.pxd":751 * ctypedef npy_long int_t * ctypedef npy_longlong long_t * ctypedef npy_longlong longlong_t # <<<<<<<<<<<<<< * * ctypedef npy_ulong uint_t */ typedef npy_longlong __pyx_t_5numpy_longlong_t; /* "C:/Anaconda2/lib/site-packages/Cython/Includes/numpy/__init__.pxd":753 * ctypedef npy_longlong longlong_t * * ctypedef npy_ulong uint_t # <<<<<<<<<<<<<< * ctypedef npy_ulonglong ulong_t * ctypedef npy_ulonglong ulonglong_t */ typedef npy_ulong __pyx_t_5numpy_uint_t; /* "C:/Anaconda2/lib/site-packages/Cython/Includes/numpy/__init__.pxd":754 * * ctypedef npy_ulong uint_t * ctypedef npy_ulonglong ulong_t # <<<<<<<<<<<<<< * ctypedef npy_ulonglong ulonglong_t * */ typedef npy_ulonglong __pyx_t_5numpy_ulong_t; /* "C:/Anaconda2/lib/site-packages/Cython/Includes/numpy/__init__.pxd":755 * ctypedef npy_ulong uint_t * ctypedef npy_ulonglong ulong_t * ctypedef npy_ulonglong ulonglong_t # <<<<<<<<<<<<<< * * ctypedef npy_intp intp_t */ typedef npy_ulonglong __pyx_t_5numpy_ulonglong_t; /* "C:/Anaconda2/lib/site-packages/Cython/Includes/numpy/__init__.pxd":757 * ctypedef npy_ulonglong ulonglong_t * * ctypedef npy_intp intp_t # <<<<<<<<<<<<<< * ctypedef npy_uintp uintp_t * */ typedef npy_intp __pyx_t_5numpy_intp_t; /* "C:/Anaconda2/lib/site-packages/Cython/Includes/numpy/__init__.pxd":758 * * ctypedef npy_intp intp_t * ctypedef npy_uintp uintp_t # <<<<<<<<<<<<<< * * ctypedef npy_double float_t */ typedef npy_uintp __pyx_t_5numpy_uintp_t; /* "C:/Anaconda2/lib/site-packages/Cython/Includes/numpy/__init__.pxd":760 * ctypedef npy_uintp uintp_t * * ctypedef npy_double float_t # <<<<<<<<<<<<<< * ctypedef npy_double double_t * ctypedef npy_longdouble longdouble_t */ typedef npy_double __pyx_t_5numpy_float_t; /* "C:/Anaconda2/lib/site-packages/Cython/Includes/numpy/__init__.pxd":761 * * ctypedef npy_double float_t * ctypedef npy_double double_t # <<<<<<<<<<<<<< * ctypedef npy_longdouble longdouble_t * */ typedef npy_double __pyx_t_5numpy_double_t; /* "C:/Anaconda2/lib/site-packages/Cython/Includes/numpy/__init__.pxd":762 * ctypedef npy_double float_t * ctypedef npy_double double_t * ctypedef npy_longdouble longdouble_t # <<<<<<<<<<<<<< * * ctypedef npy_cfloat cfloat_t */ typedef npy_longdouble __pyx_t_5numpy_longdouble_t; /* None.proto */ #if CYTHON_CCOMPLEX #ifdef __cplusplus typedef ::std::complex< float > __pyx_t_float_complex; #else typedef float _Complex __pyx_t_float_complex; #endif #else typedef struct { float real, imag; } __pyx_t_float_complex; #endif /* None.proto */ #if CYTHON_CCOMPLEX #ifdef __cplusplus typedef ::std::complex< double > __pyx_t_double_complex; #else typedef double _Complex __pyx_t_double_complex; #endif #else typedef struct { double real, imag; } __pyx_t_double_complex; #endif /*--- Type declarations ---*/ /* "C:/Anaconda2/lib/site-packages/Cython/Includes/numpy/__init__.pxd":764 * ctypedef npy_longdouble longdouble_t * * ctypedef npy_cfloat cfloat_t # <<<<<<<<<<<<<< * ctypedef npy_cdouble cdouble_t * ctypedef npy_clongdouble clongdouble_t */ typedef npy_cfloat __pyx_t_5numpy_cfloat_t; /* "C:/Anaconda2/lib/site-packages/Cython/Includes/numpy/__init__.pxd":765 * * ctypedef npy_cfloat cfloat_t * ctypedef npy_cdouble cdouble_t # <<<<<<<<<<<<<< * ctypedef npy_clongdouble clongdouble_t * */ typedef npy_cdouble __pyx_t_5numpy_cdouble_t; /* "C:/Anaconda2/lib/site-packages/Cython/Includes/numpy/__init__.pxd":766 * ctypedef npy_cfloat cfloat_t * ctypedef npy_cdouble cdouble_t * ctypedef npy_clongdouble clongdouble_t # <<<<<<<<<<<<<< * * ctypedef npy_cdouble complex_t */ typedef npy_clongdouble __pyx_t_5numpy_clongdouble_t; /* "C:/Anaconda2/lib/site-packages/Cython/Includes/numpy/__init__.pxd":768 * ctypedef npy_clongdouble clongdouble_t * * ctypedef npy_cdouble complex_t # <<<<<<<<<<<<<< * * cdef inline object PyArray_MultiIterNew1(a): */ typedef npy_cdouble __pyx_t_5numpy_complex_t; /* --- Runtime support code (head) --- */ /* Refnanny.proto */ #ifndef CYTHON_REFNANNY #define CYTHON_REFNANNY 0 #endif #if CYTHON_REFNANNY typedef struct { void (*INCREF)(void*, PyObject*, int); void (*DECREF)(void*, PyObject*, int); void (*GOTREF)(void*, PyObject*, int); void (*GIVEREF)(void*, PyObject*, int); void* (*SetupContext)(const char*, int, const char*); void (*FinishContext)(void**); } __Pyx_RefNannyAPIStruct; static __Pyx_RefNannyAPIStruct *__Pyx_RefNanny = NULL; static __Pyx_RefNannyAPIStruct *__Pyx_RefNannyImportAPI(const char *modname); #define __Pyx_RefNannyDeclarations void *__pyx_refnanny = NULL; #ifdef WITH_THREAD #define __Pyx_RefNannySetupContext(name, acquire_gil)\ if (acquire_gil) {\ PyGILState_STATE __pyx_gilstate_save = PyGILState_Ensure();\ __pyx_refnanny = __Pyx_RefNanny->SetupContext((name), __LINE__, __FILE__);\ PyGILState_Release(__pyx_gilstate_save);\ } else {\ __pyx_refnanny = __Pyx_RefNanny->SetupContext((name), __LINE__, __FILE__);\ } #else #define __Pyx_RefNannySetupContext(name, acquire_gil)\ __pyx_refnanny = __Pyx_RefNanny->SetupContext((name), __LINE__, __FILE__) #endif #define __Pyx_RefNannyFinishContext()\ __Pyx_RefNanny->FinishContext(&__pyx_refnanny) #define __Pyx_INCREF(r) __Pyx_RefNanny->INCREF(__pyx_refnanny, (PyObject *)(r), __LINE__) #define __Pyx_DECREF(r) __Pyx_RefNanny->DECREF(__pyx_refnanny, (PyObject *)(r), __LINE__) #define __Pyx_GOTREF(r) __Pyx_RefNanny->GOTREF(__pyx_refnanny, (PyObject *)(r), __LINE__) #define __Pyx_GIVEREF(r) __Pyx_RefNanny->GIVEREF(__pyx_refnanny, (PyObject *)(r), __LINE__) #define __Pyx_XINCREF(r) do { if((r) != NULL) {__Pyx_INCREF(r); }} while(0) #define __Pyx_XDECREF(r) do { if((r) != NULL) {__Pyx_DECREF(r); }} while(0) #define __Pyx_XGOTREF(r) do { if((r) != NULL) {__Pyx_GOTREF(r); }} while(0) #define __Pyx_XGIVEREF(r) do { if((r) != NULL) {__Pyx_GIVEREF(r);}} while(0) #else #define __Pyx_RefNannyDeclarations #define __Pyx_RefNannySetupContext(name, acquire_gil) #define __Pyx_RefNannyFinishContext() #define __Pyx_INCREF(r) Py_INCREF(r) #define __Pyx_DECREF(r) Py_DECREF(r) #define __Pyx_GOTREF(r) #define __Pyx_GIVEREF(r) #define __Pyx_XINCREF(r) Py_XINCREF(r) #define __Pyx_XDECREF(r) Py_XDECREF(r) #define __Pyx_XGOTREF(r) #define __Pyx_XGIVEREF(r) #endif #define __Pyx_XDECREF_SET(r, v) do {\ PyObject *tmp = (PyObject *) r;\ r = v; __Pyx_XDECREF(tmp);\ } while (0) #define __Pyx_DECREF_SET(r, v) do {\ PyObject *tmp = (PyObject *) r;\ r = v; __Pyx_DECREF(tmp);\ } while (0) #define __Pyx_CLEAR(r) do { PyObject* tmp = ((PyObject*)(r)); r = NULL; __Pyx_DECREF(tmp);} while(0) #define __Pyx_XCLEAR(r) do { if((r) != NULL) {PyObject* tmp = ((PyObject*)(r)); r = NULL; __Pyx_DECREF(tmp);}} while(0) /* RaiseArgTupleInvalid.proto */ static void __Pyx_RaiseArgtupleInvalid(const char* func_name, int exact, Py_ssize_t num_min, Py_ssize_t num_max, Py_ssize_t num_found); /* RaiseDoubleKeywords.proto */ static void __Pyx_RaiseDoubleKeywordsError(const char* func_name, PyObject* kw_name); /* ParseKeywords.proto */ static int __Pyx_ParseOptionalKeywords(PyObject *kwds, PyObject **argnames[],\ PyObject *kwds2, PyObject *values[], Py_ssize_t num_pos_args,\ const char* function_name); /* ArgTypeTest.proto */ static CYTHON_INLINE int __Pyx_ArgTypeTest(PyObject *obj, PyTypeObject *type, int none_allowed, const char *name, int exact); /* BufferFormatCheck.proto */ static CYTHON_INLINE int __Pyx_GetBufferAndValidate(Py_buffer* buf, PyObject* obj, __Pyx_TypeInfo* dtype, int flags, int nd, int cast, __Pyx_BufFmt_StackElem* stack); static CYTHON_INLINE void __Pyx_SafeReleaseBuffer(Py_buffer* info); static const char* __Pyx_BufFmt_CheckString(__Pyx_BufFmt_Context* ctx, const char* ts); static void __Pyx_BufFmt_Init(__Pyx_BufFmt_Context* ctx, __Pyx_BufFmt_StackElem* stack, __Pyx_TypeInfo* type); // PROTO /* PyObjectGetAttrStr.proto */ #if CYTHON_COMPILING_IN_CPYTHON static CYTHON_INLINE PyObject* __Pyx_PyObject_GetAttrStr(PyObject* obj, PyObject* attr_name) { PyTypeObject* tp = Py_TYPE(obj); if (likely(tp->tp_getattro)) return tp->tp_getattro(obj, attr_name); #if PY_MAJOR_VERSION < 3 if (likely(tp->tp_getattr)) return tp->tp_getattr(obj, PyString_AS_STRING(attr_name)); #endif return PyObject_GetAttr(obj, attr_name); } #else #define __Pyx_PyObject_GetAttrStr(o,n) PyObject_GetAttr(o,n) #endif /* GetBuiltinName.proto */ static PyObject *__Pyx_GetBuiltinName(PyObject *name); /* GetModuleGlobalName.proto */ static CYTHON_INLINE PyObject *__Pyx_GetModuleGlobalName(PyObject *name); /* PyObjectCall.proto */ #if CYTHON_COMPILING_IN_CPYTHON static CYTHON_INLINE PyObject* __Pyx_PyObject_Call(PyObject *func, PyObject *arg, PyObject *kw); #else #define __Pyx_PyObject_Call(func, arg, kw) PyObject_Call(func, arg, kw) #endif /* ExtTypeTest.proto */ static CYTHON_INLINE int __Pyx_TypeTest(PyObject *obj, PyTypeObject *type); /* PyObjectCallMethO.proto */ #if CYTHON_COMPILING_IN_CPYTHON static CYTHON_INLINE PyObject* __Pyx_PyObject_CallMethO(PyObject *func, PyObject *arg); #endif /* PyObjectCallOneArg.proto */ static CYTHON_INLINE PyObject* __Pyx_PyObject_CallOneArg(PyObject *func, PyObject *arg); /* PyObjectCallNoArg.proto */ #if CYTHON_COMPILING_IN_CPYTHON static CYTHON_INLINE PyObject* __Pyx_PyObject_CallNoArg(PyObject *func); #else #define __Pyx_PyObject_CallNoArg(func) __Pyx_PyObject_Call(func, __pyx_empty_tuple, NULL) #endif /* BufferIndexError.proto */ static void __Pyx_RaiseBufferIndexError(int axis); #define __Pyx_BufPtrStrided1d(type, buf, i0, s0) (type)((char*)buf + i0 * s0) #define __Pyx_BufPtrStrided2d(type, buf, i0, s0, i1, s1) (type)((char*)buf + i0 * s0 + i1 * s1) /* SliceObject.proto */ static CYTHON_INLINE PyObject* __Pyx_PyObject_GetSlice( PyObject* obj, Py_ssize_t cstart, Py_ssize_t cstop, PyObject** py_start, PyObject** py_stop, PyObject** py_slice, int has_cstart, int has_cstop, int wraparound); /* BufferFallbackError.proto */ static void __Pyx_RaiseBufferFallbackError(void); /* PyThreadStateGet.proto */ #if CYTHON_COMPILING_IN_CPYTHON #define __Pyx_PyThreadState_declare PyThreadState *__pyx_tstate; #define __Pyx_PyThreadState_assign __pyx_tstate = PyThreadState_GET(); #else #define __Pyx_PyThreadState_declare #define __Pyx_PyThreadState_assign #endif /* PyErrFetchRestore.proto */ #if CYTHON_COMPILING_IN_CPYTHON #define __Pyx_ErrRestoreWithState(type, value, tb) __Pyx_ErrRestoreInState(PyThreadState_GET(), type, value, tb) #define __Pyx_ErrFetchWithState(type, value, tb) __Pyx_ErrFetchInState(PyThreadState_GET(), type, value, tb) #define __Pyx_ErrRestore(type, value, tb) __Pyx_ErrRestoreInState(__pyx_tstate, type, value, tb) #define __Pyx_ErrFetch(type, value, tb) __Pyx_ErrFetchInState(__pyx_tstate, type, value, tb) static CYTHON_INLINE void __Pyx_ErrRestoreInState(PyThreadState *tstate, PyObject *type, PyObject *value, PyObject *tb); static CYTHON_INLINE void __Pyx_ErrFetchInState(PyThreadState *tstate, PyObject **type, PyObject **value, PyObject **tb); #else #define __Pyx_ErrRestoreWithState(type, value, tb) PyErr_Restore(type, value, tb) #define __Pyx_ErrFetchWithState(type, value, tb) PyErr_Fetch(type, value, tb) #define __Pyx_ErrRestore(type, value, tb) PyErr_Restore(type, value, tb) #define __Pyx_ErrFetch(type, value, tb) PyErr_Fetch(type, value, tb) #endif /* RaiseException.proto */ static void __Pyx_Raise(PyObject *type, PyObject *value, PyObject *tb, PyObject *cause); /* DictGetItem.proto */ #if PY_MAJOR_VERSION >= 3 && !CYTHON_COMPILING_IN_PYPY static PyObject *__Pyx_PyDict_GetItem(PyObject *d, PyObject* key) { PyObject *value; value = PyDict_GetItemWithError(d, key); if (unlikely(!value)) { if (!PyErr_Occurred()) { PyObject* args = PyTuple_Pack(1, key); if (likely(args)) PyErr_SetObject(PyExc_KeyError, args); Py_XDECREF(args); } return NULL; } Py_INCREF(value); return value; } #else #define __Pyx_PyDict_GetItem(d, key) PyObject_GetItem(d, key) #endif /* RaiseTooManyValuesToUnpack.proto */ static CYTHON_INLINE void __Pyx_RaiseTooManyValuesError(Py_ssize_t expected); /* RaiseNeedMoreValuesToUnpack.proto */ static CYTHON_INLINE void __Pyx_RaiseNeedMoreValuesError(Py_ssize_t index); /* RaiseNoneIterError.proto */ static CYTHON_INLINE void __Pyx_RaiseNoneNotIterableError(void); /* Import.proto */ static PyObject *__Pyx_Import(PyObject *name, PyObject *from_list, int level); /* CodeObjectCache.proto */ typedef struct { PyCodeObject* code_object; int code_line; } __Pyx_CodeObjectCacheEntry; struct __Pyx_CodeObjectCache { int count; int max_count; __Pyx_CodeObjectCacheEntry* entries; }; static struct __Pyx_CodeObjectCache __pyx_code_cache = {0,0,NULL}; static int __pyx_bisect_code_objects(__Pyx_CodeObjectCacheEntry* entries, int count, int code_line); static PyCodeObject *__pyx_find_code_object(int code_line); static void __pyx_insert_code_object(int code_line, PyCodeObject* code_object); /* AddTraceback.proto */ static void __Pyx_AddTraceback(const char *funcname, int c_line, int py_line, const char *filename); /* BufferStructDeclare.proto */ typedef struct { Py_ssize_t shape, strides, suboffsets; } __Pyx_Buf_DimInfo; typedef struct { size_t refcount; Py_buffer pybuffer; } __Pyx_Buffer; typedef struct { __Pyx_Buffer *rcbuffer; char *data; __Pyx_Buf_DimInfo diminfo[8]; } __Pyx_LocalBuf_ND; #if PY_MAJOR_VERSION < 3 static int __Pyx_GetBuffer(PyObject *obj, Py_buffer *view, int flags); static void __Pyx_ReleaseBuffer(Py_buffer *view); #else #define __Pyx_GetBuffer PyObject_GetBuffer #define __Pyx_ReleaseBuffer PyBuffer_Release #endif /* None.proto */ static Py_ssize_t __Pyx_zeros[] = {0, 0, 0, 0, 0, 0, 0, 0}; static Py_ssize_t __Pyx_minusones[] = {-1, -1, -1, -1, -1, -1, -1, -1}; /* CIntToPy.proto */ static CYTHON_INLINE PyObject* __Pyx_PyInt_From_int(int value); /* None.proto */ #if CYTHON_CCOMPLEX #ifdef __cplusplus #define __Pyx_CREAL(z) ((z).real()) #define __Pyx_CIMAG(z) ((z).imag()) #else #define __Pyx_CREAL(z) (__real__(z)) #define __Pyx_CIMAG(z) (__imag__(z)) #endif #else #define __Pyx_CREAL(z) ((z).real) #define __Pyx_CIMAG(z) ((z).imag) #endif #if defined(__cplusplus) && CYTHON_CCOMPLEX && (defined(_WIN32) || defined(__clang__) || (defined(__GNUC__) && (__GNUC__ >= 5 || __GNUC__ == 4 && __GNUC_MINOR__ >= 4 )) || __cplusplus >= 201103) #define __Pyx_SET_CREAL(z,x) ((z).real(x)) #define __Pyx_SET_CIMAG(z,y) ((z).imag(y)) #else #define __Pyx_SET_CREAL(z,x) __Pyx_CREAL(z) = (x) #define __Pyx_SET_CIMAG(z,y) __Pyx_CIMAG(z) = (y) #endif /* None.proto */ static CYTHON_INLINE __pyx_t_float_complex __pyx_t_float_complex_from_parts(float, float); /* None.proto */ #if CYTHON_CCOMPLEX #define __Pyx_c_eqf(a, b) ((a)==(b)) #define __Pyx_c_sumf(a, b) ((a)+(b)) #define __Pyx_c_difff(a, b) ((a)-(b)) #define __Pyx_c_prodf(a, b) ((a)*(b)) #define __Pyx_c_quotf(a, b) ((a)/(b)) #define __Pyx_c_negf(a) (-(a)) #ifdef __cplusplus #define __Pyx_c_is_zerof(z) ((z)==(float)0) #define __Pyx_c_conjf(z) (::std::conj(z)) #if 1 #define __Pyx_c_absf(z) (::std::abs(z)) #define __Pyx_c_powf(a, b) (::::pow(a, b)) #endif #else #define __Pyx_c_is_zerof(z) ((z)==0) #define __Pyx_c_conjf(z) (conjf(z)) #if 1 #define __Pyx_c_absf(z) (cabsf(z)) #define __Pyx_c_powf(a, b) (cpowf(a, b)) #endif #endif #else static CYTHON_INLINE int __Pyx_c_eqf(__pyx_t_float_complex, __pyx_t_float_complex); static CYTHON_INLINE __pyx_t_float_complex __Pyx_c_sumf(__pyx_t_float_complex, __pyx_t_float_complex); static CYTHON_INLINE __pyx_t_float_complex __Pyx_c_difff(__pyx_t_float_complex, __pyx_t_float_complex); static CYTHON_INLINE __pyx_t_float_complex __Pyx_c_prodf(__pyx_t_float_complex, __pyx_t_float_complex); static CYTHON_INLINE __pyx_t_float_complex __Pyx_c_quotf(__pyx_t_float_complex, __pyx_t_float_complex); static CYTHON_INLINE __pyx_t_float_complex __Pyx_c_negf(__pyx_t_float_complex); static CYTHON_INLINE int __Pyx_c_is_zerof(__pyx_t_float_complex); static CYTHON_INLINE __pyx_t_float_complex __Pyx_c_conjf(__pyx_t_float_complex); #if 1 static CYTHON_INLINE float __Pyx_c_absf(__pyx_t_float_complex); static CYTHON_INLINE __pyx_t_float_complex __Pyx_c_powf(__pyx_t_float_complex, __pyx_t_float_complex); #endif #endif /* None.proto */ static CYTHON_INLINE __pyx_t_double_complex __pyx_t_double_complex_from_parts(double, double); /* None.proto */ #if CYTHON_CCOMPLEX #define __Pyx_c_eq(a, b) ((a)==(b)) #define __Pyx_c_sum(a, b) ((a)+(b)) #define __Pyx_c_diff(a, b) ((a)-(b)) #define __Pyx_c_prod(a, b) ((a)*(b)) #define __Pyx_c_quot(a, b) ((a)/(b)) #define __Pyx_c_neg(a) (-(a)) #ifdef __cplusplus #define __Pyx_c_is_zero(z) ((z)==(double)0) #define __Pyx_c_conj(z) (::std::conj(z)) #if 1 #define __Pyx_c_abs(z) (::std::abs(z)) #define __Pyx_c_pow(a, b) (::::pow(a, b)) #endif #else #define __Pyx_c_is_zero(z) ((z)==0) #define __Pyx_c_conj(z) (conj(z)) #if 1 #define __Pyx_c_abs(z) (cabs(z)) #define __Pyx_c_pow(a, b) (cpow(a, b)) #endif #endif #else static CYTHON_INLINE int __Pyx_c_eq(__pyx_t_double_complex, __pyx_t_double_complex); static CYTHON_INLINE __pyx_t_double_complex __Pyx_c_sum(__pyx_t_double_complex, __pyx_t_double_complex); static CYTHON_INLINE __pyx_t_double_complex __Pyx_c_diff(__pyx_t_double_complex, __pyx_t_double_complex); static CYTHON_INLINE __pyx_t_double_complex __Pyx_c_prod(__pyx_t_double_complex, __pyx_t_double_complex); static CYTHON_INLINE __pyx_t_double_complex __Pyx_c_quot(__pyx_t_double_complex, __pyx_t_double_complex); static CYTHON_INLINE __pyx_t_double_complex __Pyx_c_neg(__pyx_t_double_complex); static CYTHON_INLINE int __Pyx_c_is_zero(__pyx_t_double_complex); static CYTHON_INLINE __pyx_t_double_complex __Pyx_c_conj(__pyx_t_double_complex); #if 1 static CYTHON_INLINE double __Pyx_c_abs(__pyx_t_double_complex); static CYTHON_INLINE __pyx_t_double_complex __Pyx_c_pow(__pyx_t_double_complex, __pyx_t_double_complex); #endif #endif /* CIntToPy.proto */ static CYTHON_INLINE PyObject* __Pyx_PyInt_From_enum__NPY_TYPES(enum NPY_TYPES value); /* CIntFromPy.proto */ static CYTHON_INLINE npy_int32 __Pyx_PyInt_As_npy_int32(PyObject *); /* CIntFromPy.proto */ static CYTHON_INLINE int __Pyx_PyInt_As_int(PyObject *); /* CIntToPy.proto */ static CYTHON_INLINE PyObject* __Pyx_PyInt_From_long(long value); /* CIntFromPy.proto */ static CYTHON_INLINE long __Pyx_PyInt_As_long(PyObject *); /* CheckBinaryVersion.proto */ static int __Pyx_check_binary_version(void); /* PyIdentifierFromString.proto */ #if !defined(__Pyx_PyIdentifier_FromString) #if PY_MAJOR_VERSION < 3 #define __Pyx_PyIdentifier_FromString(s) PyString_FromString(s) #else #define __Pyx_PyIdentifier_FromString(s) PyUnicode_FromString(s) #endif #endif /* ModuleImport.proto */ static PyObject *__Pyx_ImportModule(const char *name); /* TypeImport.proto */ static PyTypeObject *__Pyx_ImportType(const char *module_name, const char *class_name, size_t size, int strict); /* InitStrings.proto */ static int __Pyx_InitStrings(__Pyx_StringTabEntry *t); /* Module declarations from 'cpython.buffer' */ /* Module declarations from 'libc.string' */ /* Module declarations from 'libc.stdio' */ /* Module declarations from '__builtin__' */ /* Module declarations from 'cpython.type' */ static PyTypeObject *__pyx_ptype_7cpython_4type_type = 0; /* Module declarations from 'cpython' */ /* Module declarations from 'cpython.object' */ /* Module declarations from 'cpython.ref' */ /* Module declarations from 'libc.stdlib' */ /* Module declarations from 'numpy' */ /* Module declarations from 'numpy' */ static PyTypeObject *__pyx_ptype_5numpy_dtype = 0; static PyTypeObject *__pyx_ptype_5numpy_flatiter = 0; static PyTypeObject *__pyx_ptype_5numpy_broadcast = 0; static PyTypeObject *__pyx_ptype_5numpy_ndarray = 0; static PyTypeObject *__pyx_ptype_5numpy_ufunc = 0; static CYTHON_INLINE char *__pyx_f_5numpy__util_dtypestring(PyArray_Descr *, char *, char *, int *); /*proto*/ /* Module declarations from 'nms.gpu_nms' */ static __Pyx_TypeInfo __Pyx_TypeInfo_nn___pyx_t_5numpy_float32_t = { "float32_t", NULL, sizeof(__pyx_t_5numpy_float32_t), { 0 }, 0, 'R', 0, 0 }; static __Pyx_TypeInfo __Pyx_TypeInfo_nn___pyx_t_5numpy_int32_t = { "int32_t", NULL, sizeof(__pyx_t_5numpy_int32_t), { 0 }, 0, IS_UNSIGNED(__pyx_t_5numpy_int32_t) ? 'U' : 'I', IS_UNSIGNED(__pyx_t_5numpy_int32_t), 0 }; static __Pyx_TypeInfo __Pyx_TypeInfo_nn___pyx_t_5numpy_intp_t = { "intp_t", NULL, sizeof(__pyx_t_5numpy_intp_t), { 0 }, 0, IS_UNSIGNED(__pyx_t_5numpy_intp_t) ? 'U' : 'I', IS_UNSIGNED(__pyx_t_5numpy_intp_t), 0 }; #define __Pyx_MODULE_NAME "nms.gpu_nms" int __pyx_module_is_main_nms__gpu_nms = 0; /* Implementation of 'nms.gpu_nms' */ static PyObject *__pyx_builtin_ValueError; static PyObject *__pyx_builtin_range; static PyObject *__pyx_builtin_RuntimeError; static const char __pyx_k_np[] = "np"; static const char __pyx_k_dets[] = "dets"; static const char __pyx_k_keep[] = "keep"; static const char __pyx_k_main[] = "__main__"; static const char __pyx_k_test[] = "__test__"; static const char __pyx_k_dtype[] = "dtype"; static const char __pyx_k_int32[] = "int32"; static const char __pyx_k_numpy[] = "numpy"; static const char __pyx_k_order[] = "order"; static const char __pyx_k_range[] = "range"; static const char __pyx_k_zeros[] = "zeros"; static const char __pyx_k_import[] = "__import__"; static const char __pyx_k_scores[] = "scores"; static const char __pyx_k_thresh[] = "thresh"; static const char __pyx_k_argsort[] = "argsort"; static const char __pyx_k_gpu_nms[] = "gpu_nms"; static const char __pyx_k_num_out[] = "num_out"; static const char __pyx_k_boxes_dim[] = "boxes_dim"; static const char __pyx_k_boxes_num[] = "boxes_num"; static const char __pyx_k_device_id[] = "device_id"; static const char __pyx_k_ValueError[] = "ValueError"; static const char __pyx_k_nms_gpu_nms[] = "nms.gpu_nms"; static const char __pyx_k_sorted_dets[] = "sorted_dets"; static const char __pyx_k_RuntimeError[] = "RuntimeError"; static const char __pyx_k_ndarray_is_not_C_contiguous[] = "ndarray is not C contiguous"; static const char __pyx_k_unknown_dtype_code_in_numpy_pxd[] = "unknown dtype code in numpy.pxd (%d)"; static const char __pyx_k_D_v_zix_caffe_caffe_win_20160523[] = "D:\\v-zix\\caffe\\caffe-win-20160523\\models\\py-faster-rcnn-windows\\lib\\nms\\gpu_nms.pyx"; static const char __pyx_k_Format_string_allocated_too_shor[] = "Format string allocated too short, see comment in numpy.pxd"; static const char __pyx_k_Non_native_byte_order_not_suppor[] = "Non-native byte order not supported"; static const char __pyx_k_ndarray_is_not_Fortran_contiguou[] = "ndarray is not Fortran contiguous"; static const char __pyx_k_Format_string_allocated_too_shor_2[] = "Format string allocated too short."; static PyObject *__pyx_kp_s_D_v_zix_caffe_caffe_win_20160523; static PyObject *__pyx_kp_u_Format_string_allocated_too_shor; static PyObject *__pyx_kp_u_Format_string_allocated_too_shor_2; static PyObject *__pyx_kp_u_Non_native_byte_order_not_suppor; static PyObject *__pyx_n_s_RuntimeError; static PyObject *__pyx_n_s_ValueError; static PyObject *__pyx_n_s_argsort; static PyObject *__pyx_n_s_boxes_dim; static PyObject *__pyx_n_s_boxes_num; static PyObject *__pyx_n_s_dets; static PyObject *__pyx_n_s_device_id; static PyObject *__pyx_n_s_dtype; static PyObject *__pyx_n_s_gpu_nms; static PyObject *__pyx_n_s_import; static PyObject *__pyx_n_s_int32; static PyObject *__pyx_n_s_keep; static PyObject *__pyx_n_s_main; static PyObject *__pyx_kp_u_ndarray_is_not_C_contiguous; static PyObject *__pyx_kp_u_ndarray_is_not_Fortran_contiguou; static PyObject *__pyx_n_s_nms_gpu_nms; static PyObject *__pyx_n_s_np; static PyObject *__pyx_n_s_num_out; static PyObject *__pyx_n_s_numpy; static PyObject *__pyx_n_s_order; static PyObject *__pyx_n_s_range; static PyObject *__pyx_n_s_scores; static PyObject *__pyx_n_s_sorted_dets; static PyObject *__pyx_n_s_test; static PyObject *__pyx_n_s_thresh; static PyObject *__pyx_kp_u_unknown_dtype_code_in_numpy_pxd; static PyObject *__pyx_n_s_zeros; static PyObject *__pyx_pf_3nms_7gpu_nms_gpu_nms(CYTHON_UNUSED PyObject *__pyx_self, PyArrayObject *__pyx_v_dets, PyObject *__pyx_v_thresh, __pyx_t_5numpy_int32_t __pyx_v_device_id); /* proto */ static int __pyx_pf_5numpy_7ndarray___getbuffer__(PyArrayObject *__pyx_v_self, Py_buffer *__pyx_v_info, int __pyx_v_flags); /* proto */ static void __pyx_pf_5numpy_7ndarray_2__releasebuffer__(PyArrayObject *__pyx_v_self, Py_buffer *__pyx_v_info); /* proto */ static PyObject *__pyx_int_4; static PyObject *__pyx_int_neg_1; static PyObject *__pyx_slice_; static PyObject *__pyx_slice__3; static PyObject *__pyx_slice__4; static PyObject *__pyx_tuple__2; static PyObject *__pyx_tuple__5; static PyObject *__pyx_tuple__6; static PyObject *__pyx_tuple__7; static PyObject *__pyx_tuple__8; static PyObject *__pyx_tuple__9; static PyObject *__pyx_tuple__10; static PyObject *__pyx_tuple__11; static PyObject *__pyx_codeobj__12; /* "nms/gpu_nms.pyx":16 * void _nms(np.int32_t*, int*, np.float32_t*, int, int, float, int) * * def gpu_nms(np.ndarray[np.float32_t, ndim=2] dets, np.float thresh, # <<<<<<<<<<<<<< * np.int32_t device_id=0): * cdef int boxes_num = dets.shape[0] */ /* Python wrapper */ static PyObject *__pyx_pw_3nms_7gpu_nms_1gpu_nms(PyObject *__pyx_self, PyObject *__pyx_args, PyObject *__pyx_kwds); /*proto*/ static PyMethodDef __pyx_mdef_3nms_7gpu_nms_1gpu_nms = {"gpu_nms", (PyCFunction)__pyx_pw_3nms_7gpu_nms_1gpu_nms, METH_VARARGS|METH_KEYWORDS, 0}; static PyObject *__pyx_pw_3nms_7gpu_nms_1gpu_nms(PyObject *__pyx_self, PyObject *__pyx_args, PyObject *__pyx_kwds) { PyArrayObject *__pyx_v_dets = 0; PyObject *__pyx_v_thresh = 0; __pyx_t_5numpy_int32_t __pyx_v_device_id; PyObject *__pyx_r = 0; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("gpu_nms (wrapper)", 0); { static PyObject **__pyx_pyargnames[] = {&__pyx_n_s_dets,&__pyx_n_s_thresh,&__pyx_n_s_device_id,0}; PyObject* values[3] = {0,0,0}; if (unlikely(__pyx_kwds)) { Py_ssize_t kw_args; const Py_ssize_t pos_args = PyTuple_GET_SIZE(__pyx_args); switch (pos_args) { case 3: values[2] = PyTuple_GET_ITEM(__pyx_args, 2); case 2: values[1] = PyTuple_GET_ITEM(__pyx_args, 1); case 1: values[0] = PyTuple_GET_ITEM(__pyx_args, 0); case 0: break; default: goto __pyx_L5_argtuple_error; } kw_args = PyDict_Size(__pyx_kwds); switch (pos_args) { case 0: if (likely((values[0] = PyDict_GetItem(__pyx_kwds, __pyx_n_s_dets)) != 0)) kw_args--; else goto __pyx_L5_argtuple_error; case 1: if (likely((values[1] = PyDict_GetItem(__pyx_kwds, __pyx_n_s_thresh)) != 0)) kw_args--; else { __Pyx_RaiseArgtupleInvalid("gpu_nms", 0, 2, 3, 1); __PYX_ERR(0, 16, __pyx_L3_error) } case 2: if (kw_args > 0) { PyObject* value = PyDict_GetItem(__pyx_kwds, __pyx_n_s_device_id); if (value) { values[2] = value; kw_args--; } } } if (unlikely(kw_args > 0)) { if (unlikely(__Pyx_ParseOptionalKeywords(__pyx_kwds, __pyx_pyargnames, 0, values, pos_args, "gpu_nms") < 0)) __PYX_ERR(0, 16, __pyx_L3_error) } } else { switch (PyTuple_GET_SIZE(__pyx_args)) { case 3: values[2] = PyTuple_GET_ITEM(__pyx_args, 2); case 2: values[1] = PyTuple_GET_ITEM(__pyx_args, 1); values[0] = PyTuple_GET_ITEM(__pyx_args, 0); break; default: goto __pyx_L5_argtuple_error; } } __pyx_v_dets = ((PyArrayObject *)values[0]); __pyx_v_thresh = ((PyObject*)values[1]); if (values[2]) { __pyx_v_device_id = __Pyx_PyInt_As_npy_int32(values[2]); if (unlikely((__pyx_v_device_id == (npy_int32)-1) && PyErr_Occurred())) __PYX_ERR(0, 17, __pyx_L3_error) } else { __pyx_v_device_id = ((__pyx_t_5numpy_int32_t)0); } } goto __pyx_L4_argument_unpacking_done; __pyx_L5_argtuple_error:; __Pyx_RaiseArgtupleInvalid("gpu_nms", 0, 2, 3, PyTuple_GET_SIZE(__pyx_args)); __PYX_ERR(0, 16, __pyx_L3_error) __pyx_L3_error:; __Pyx_AddTraceback("nms.gpu_nms.gpu_nms", __pyx_clineno, __pyx_lineno, __pyx_filename); __Pyx_RefNannyFinishContext(); return NULL; __pyx_L4_argument_unpacking_done:; if (unlikely(!__Pyx_ArgTypeTest(((PyObject *)__pyx_v_dets), __pyx_ptype_5numpy_ndarray, 1, "dets", 0))) __PYX_ERR(0, 16, __pyx_L1_error) if (unlikely(!__Pyx_ArgTypeTest(((PyObject *)__pyx_v_thresh), (&PyFloat_Type), 1, "thresh", 1))) __PYX_ERR(0, 16, __pyx_L1_error) __pyx_r = __pyx_pf_3nms_7gpu_nms_gpu_nms(__pyx_self, __pyx_v_dets, __pyx_v_thresh, __pyx_v_device_id); /* function exit code */ goto __pyx_L0; __pyx_L1_error:; __pyx_r = NULL; __pyx_L0:; __Pyx_RefNannyFinishContext(); return __pyx_r; } static PyObject *__pyx_pf_3nms_7gpu_nms_gpu_nms(CYTHON_UNUSED PyObject *__pyx_self, PyArrayObject *__pyx_v_dets, PyObject *__pyx_v_thresh, __pyx_t_5numpy_int32_t __pyx_v_device_id) { int __pyx_v_boxes_num; int __pyx_v_boxes_dim; int __pyx_v_num_out; PyArrayObject *__pyx_v_keep = 0; PyArrayObject *__pyx_v_scores = 0; PyArrayObject *__pyx_v_order = 0; PyArrayObject *__pyx_v_sorted_dets = 0; __Pyx_LocalBuf_ND __pyx_pybuffernd_dets; __Pyx_Buffer __pyx_pybuffer_dets; __Pyx_LocalBuf_ND __pyx_pybuffernd_keep; __Pyx_Buffer __pyx_pybuffer_keep; __Pyx_LocalBuf_ND __pyx_pybuffernd_order; __Pyx_Buffer __pyx_pybuffer_order; __Pyx_LocalBuf_ND __pyx_pybuffernd_scores; __Pyx_Buffer __pyx_pybuffer_scores; __Pyx_LocalBuf_ND __pyx_pybuffernd_sorted_dets; __Pyx_Buffer __pyx_pybuffer_sorted_dets; PyObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations PyObject *__pyx_t_1 = NULL; PyObject *__pyx_t_2 = NULL; PyObject *__pyx_t_3 = NULL; PyObject *__pyx_t_4 = NULL; PyObject *__pyx_t_5 = NULL; PyArrayObject *__pyx_t_6 = NULL; PyArrayObject *__pyx_t_7 = NULL; PyArrayObject *__pyx_t_8 = NULL; PyArrayObject *__pyx_t_9 = NULL; Py_ssize_t __pyx_t_10; int __pyx_t_11; Py_ssize_t __pyx_t_12; Py_ssize_t __pyx_t_13; float __pyx_t_14; PyObject *__pyx_t_15 = NULL; PyObject *__pyx_t_16 = NULL; PyObject *__pyx_t_17 = NULL; __Pyx_RefNannySetupContext("gpu_nms", 0); __pyx_pybuffer_keep.pybuffer.buf = NULL; __pyx_pybuffer_keep.refcount = 0; __pyx_pybuffernd_keep.data = NULL; __pyx_pybuffernd_keep.rcbuffer = &__pyx_pybuffer_keep; __pyx_pybuffer_scores.pybuffer.buf = NULL; __pyx_pybuffer_scores.refcount = 0; __pyx_pybuffernd_scores.data = NULL; __pyx_pybuffernd_scores.rcbuffer = &__pyx_pybuffer_scores; __pyx_pybuffer_order.pybuffer.buf = NULL; __pyx_pybuffer_order.refcount = 0; __pyx_pybuffernd_order.data = NULL; __pyx_pybuffernd_order.rcbuffer = &__pyx_pybuffer_order; __pyx_pybuffer_sorted_dets.pybuffer.buf = NULL; __pyx_pybuffer_sorted_dets.refcount = 0; __pyx_pybuffernd_sorted_dets.data = NULL; __pyx_pybuffernd_sorted_dets.rcbuffer = &__pyx_pybuffer_sorted_dets; __pyx_pybuffer_dets.pybuffer.buf = NULL; __pyx_pybuffer_dets.refcount = 0; __pyx_pybuffernd_dets.data = NULL; __pyx_pybuffernd_dets.rcbuffer = &__pyx_pybuffer_dets; { __Pyx_BufFmt_StackElem __pyx_stack[1]; if (unlikely(__Pyx_GetBufferAndValidate(&__pyx_pybuffernd_dets.rcbuffer->pybuffer, (PyObject*)__pyx_v_dets, &__Pyx_TypeInfo_nn___pyx_t_5numpy_float32_t, PyBUF_FORMAT| PyBUF_STRIDES, 2, 0, __pyx_stack) == -1)) __PYX_ERR(0, 16, __pyx_L1_error) } __pyx_pybuffernd_dets.diminfo[0].strides = __pyx_pybuffernd_dets.rcbuffer->pybuffer.strides[0]; __pyx_pybuffernd_dets.diminfo[0].shape = __pyx_pybuffernd_dets.rcbuffer->pybuffer.shape[0]; __pyx_pybuffernd_dets.diminfo[1].strides = __pyx_pybuffernd_dets.rcbuffer->pybuffer.strides[1]; __pyx_pybuffernd_dets.diminfo[1].shape = __pyx_pybuffernd_dets.rcbuffer->pybuffer.shape[1]; /* "nms/gpu_nms.pyx":18 * def gpu_nms(np.ndarray[np.float32_t, ndim=2] dets, np.float thresh, * np.int32_t device_id=0): * cdef int boxes_num = dets.shape[0] # <<<<<<<<<<<<<< * cdef int boxes_dim = dets.shape[1] * cdef int num_out */ __pyx_v_boxes_num = (__pyx_v_dets->dimensions[0]); /* "nms/gpu_nms.pyx":19 * np.int32_t device_id=0): * cdef int boxes_num = dets.shape[0] * cdef int boxes_dim = dets.shape[1] # <<<<<<<<<<<<<< * cdef int num_out * cdef np.ndarray[np.int32_t, ndim=1] \ */ __pyx_v_boxes_dim = (__pyx_v_dets->dimensions[1]); /* "nms/gpu_nms.pyx":22 * cdef int num_out * cdef np.ndarray[np.int32_t, ndim=1] \ * keep = np.zeros(boxes_num, dtype=np.int32) # <<<<<<<<<<<<<< * cdef np.ndarray[np.float32_t, ndim=1] \ * scores = dets[:, 4] */ __pyx_t_1 = __Pyx_GetModuleGlobalName(__pyx_n_s_np); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 22, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __pyx_t_2 = __Pyx_PyObject_GetAttrStr(__pyx_t_1, __pyx_n_s_zeros); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 22, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_2); __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; __pyx_t_1 = __Pyx_PyInt_From_int(__pyx_v_boxes_num); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 22, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __pyx_t_3 = PyTuple_New(1); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 22, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __Pyx_GIVEREF(__pyx_t_1); PyTuple_SET_ITEM(__pyx_t_3, 0, __pyx_t_1); __pyx_t_1 = 0; __pyx_t_1 = PyDict_New(); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 22, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __pyx_t_4 = __Pyx_GetModuleGlobalName(__pyx_n_s_np); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 22, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_4); __pyx_t_5 = __Pyx_PyObject_GetAttrStr(__pyx_t_4, __pyx_n_s_int32); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 22, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_5); __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; if (PyDict_SetItem(__pyx_t_1, __pyx_n_s_dtype, __pyx_t_5) < 0) __PYX_ERR(0, 22, __pyx_L1_error) __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0; __pyx_t_5 = __Pyx_PyObject_Call(__pyx_t_2, __pyx_t_3, __pyx_t_1); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 22, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_5); __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; if (!(likely(((__pyx_t_5) == Py_None) || likely(__Pyx_TypeTest(__pyx_t_5, __pyx_ptype_5numpy_ndarray))))) __PYX_ERR(0, 22, __pyx_L1_error) __pyx_t_6 = ((PyArrayObject *)__pyx_t_5); { __Pyx_BufFmt_StackElem __pyx_stack[1]; if (unlikely(__Pyx_GetBufferAndValidate(&__pyx_pybuffernd_keep.rcbuffer->pybuffer, (PyObject*)__pyx_t_6, &__Pyx_TypeInfo_nn___pyx_t_5numpy_int32_t, PyBUF_FORMAT| PyBUF_STRIDES, 1, 0, __pyx_stack) == -1)) { __pyx_v_keep = ((PyArrayObject *)Py_None); __Pyx_INCREF(Py_None); __pyx_pybuffernd_keep.rcbuffer->pybuffer.buf = NULL; __PYX_ERR(0, 21, __pyx_L1_error) } else {__pyx_pybuffernd_keep.diminfo[0].strides = __pyx_pybuffernd_keep.rcbuffer->pybuffer.strides[0]; __pyx_pybuffernd_keep.diminfo[0].shape = __pyx_pybuffernd_keep.rcbuffer->pybuffer.shape[0]; } } __pyx_t_6 = 0; __pyx_v_keep = ((PyArrayObject *)__pyx_t_5); __pyx_t_5 = 0; /* "nms/gpu_nms.pyx":24 * keep = np.zeros(boxes_num, dtype=np.int32) * cdef np.ndarray[np.float32_t, ndim=1] \ * scores = dets[:, 4] # <<<<<<<<<<<<<< * #cdef np.ndarray[np.int_t, ndim=1] \ // 20160601, by xzn * # order = scores.argsort()[::-1] */ __pyx_t_5 = PyObject_GetItem(((PyObject *)__pyx_v_dets), __pyx_tuple__2); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 24, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_5); if (!(likely(((__pyx_t_5) == Py_None) || likely(__Pyx_TypeTest(__pyx_t_5, __pyx_ptype_5numpy_ndarray))))) __PYX_ERR(0, 24, __pyx_L1_error) __pyx_t_7 = ((PyArrayObject *)__pyx_t_5); { __Pyx_BufFmt_StackElem __pyx_stack[1]; if (unlikely(__Pyx_GetBufferAndValidate(&__pyx_pybuffernd_scores.rcbuffer->pybuffer, (PyObject*)__pyx_t_7, &__Pyx_TypeInfo_nn___pyx_t_5numpy_float32_t, PyBUF_FORMAT| PyBUF_STRIDES, 1, 0, __pyx_stack) == -1)) { __pyx_v_scores = ((PyArrayObject *)Py_None); __Pyx_INCREF(Py_None); __pyx_pybuffernd_scores.rcbuffer->pybuffer.buf = NULL; __PYX_ERR(0, 23, __pyx_L1_error) } else {__pyx_pybuffernd_scores.diminfo[0].strides = __pyx_pybuffernd_scores.rcbuffer->pybuffer.strides[0]; __pyx_pybuffernd_scores.diminfo[0].shape = __pyx_pybuffernd_scores.rcbuffer->pybuffer.shape[0]; } } __pyx_t_7 = 0; __pyx_v_scores = ((PyArrayObject *)__pyx_t_5); __pyx_t_5 = 0; /* "nms/gpu_nms.pyx":28 * # order = scores.argsort()[::-1] * cdef np.ndarray[np.intp_t, ndim=1] \ * order = scores.argsort()[::-1] # <<<<<<<<<<<<<< * cdef np.ndarray[np.float32_t, ndim=2] \ * sorted_dets = dets[order, :] */ __pyx_t_1 = __Pyx_PyObject_GetAttrStr(((PyObject *)__pyx_v_scores), __pyx_n_s_argsort); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 28, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __pyx_t_3 = NULL; if (CYTHON_COMPILING_IN_CPYTHON && likely(PyMethod_Check(__pyx_t_1))) { __pyx_t_3 = PyMethod_GET_SELF(__pyx_t_1); if (likely(__pyx_t_3)) { PyObject* function = PyMethod_GET_FUNCTION(__pyx_t_1); __Pyx_INCREF(__pyx_t_3); __Pyx_INCREF(function); __Pyx_DECREF_SET(__pyx_t_1, function); } } if (__pyx_t_3) { __pyx_t_5 = __Pyx_PyObject_CallOneArg(__pyx_t_1, __pyx_t_3); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 28, __pyx_L1_error) __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; } else { __pyx_t_5 = __Pyx_PyObject_CallNoArg(__pyx_t_1); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 28, __pyx_L1_error) } __Pyx_GOTREF(__pyx_t_5); __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; __pyx_t_1 = PyObject_GetItem(__pyx_t_5, __pyx_slice__3); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 28, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0; if (!(likely(((__pyx_t_1) == Py_None) || likely(__Pyx_TypeTest(__pyx_t_1, __pyx_ptype_5numpy_ndarray))))) __PYX_ERR(0, 28, __pyx_L1_error) __pyx_t_8 = ((PyArrayObject *)__pyx_t_1); { __Pyx_BufFmt_StackElem __pyx_stack[1]; if (unlikely(__Pyx_GetBufferAndValidate(&__pyx_pybuffernd_order.rcbuffer->pybuffer, (PyObject*)__pyx_t_8, &__Pyx_TypeInfo_nn___pyx_t_5numpy_intp_t, PyBUF_FORMAT| PyBUF_STRIDES, 1, 0, __pyx_stack) == -1)) { __pyx_v_order = ((PyArrayObject *)Py_None); __Pyx_INCREF(Py_None); __pyx_pybuffernd_order.rcbuffer->pybuffer.buf = NULL; __PYX_ERR(0, 27, __pyx_L1_error) } else {__pyx_pybuffernd_order.diminfo[0].strides = __pyx_pybuffernd_order.rcbuffer->pybuffer.strides[0]; __pyx_pybuffernd_order.diminfo[0].shape = __pyx_pybuffernd_order.rcbuffer->pybuffer.shape[0]; } } __pyx_t_8 = 0; __pyx_v_order = ((PyArrayObject *)__pyx_t_1); __pyx_t_1 = 0; /* "nms/gpu_nms.pyx":30 * order = scores.argsort()[::-1] * cdef np.ndarray[np.float32_t, ndim=2] \ * sorted_dets = dets[order, :] # <<<<<<<<<<<<<< * _nms(&keep[0], &num_out, &sorted_dets[0, 0], boxes_num, boxes_dim, thresh, device_id) * keep = keep[:num_out] */ __pyx_t_1 = PyTuple_New(2); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 30, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __Pyx_INCREF(((PyObject *)__pyx_v_order)); __Pyx_GIVEREF(((PyObject *)__pyx_v_order)); PyTuple_SET_ITEM(__pyx_t_1, 0, ((PyObject *)__pyx_v_order)); __Pyx_INCREF(__pyx_slice__4); __Pyx_GIVEREF(__pyx_slice__4); PyTuple_SET_ITEM(__pyx_t_1, 1, __pyx_slice__4); __pyx_t_5 = PyObject_GetItem(((PyObject *)__pyx_v_dets), __pyx_t_1); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 30, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_5); __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; if (!(likely(((__pyx_t_5) == Py_None) || likely(__Pyx_TypeTest(__pyx_t_5, __pyx_ptype_5numpy_ndarray))))) __PYX_ERR(0, 30, __pyx_L1_error) __pyx_t_9 = ((PyArrayObject *)__pyx_t_5); { __Pyx_BufFmt_StackElem __pyx_stack[1]; if (unlikely(__Pyx_GetBufferAndValidate(&__pyx_pybuffernd_sorted_dets.rcbuffer->pybuffer, (PyObject*)__pyx_t_9, &__Pyx_TypeInfo_nn___pyx_t_5numpy_float32_t, PyBUF_FORMAT| PyBUF_STRIDES, 2, 0, __pyx_stack) == -1)) { __pyx_v_sorted_dets = ((PyArrayObject *)Py_None); __Pyx_INCREF(Py_None); __pyx_pybuffernd_sorted_dets.rcbuffer->pybuffer.buf = NULL; __PYX_ERR(0, 29, __pyx_L1_error) } else {__pyx_pybuffernd_sorted_dets.diminfo[0].strides = __pyx_pybuffernd_sorted_dets.rcbuffer->pybuffer.strides[0]; __pyx_pybuffernd_sorted_dets.diminfo[0].shape = __pyx_pybuffernd_sorted_dets.rcbuffer->pybuffer.shape[0]; __pyx_pybuffernd_sorted_dets.diminfo[1].strides = __pyx_pybuffernd_sorted_dets.rcbuffer->pybuffer.strides[1]; __pyx_pybuffernd_sorted_dets.diminfo[1].shape = __pyx_pybuffernd_sorted_dets.rcbuffer->pybuffer.shape[1]; } } __pyx_t_9 = 0; __pyx_v_sorted_dets = ((PyArrayObject *)__pyx_t_5); __pyx_t_5 = 0; /* "nms/gpu_nms.pyx":31 * cdef np.ndarray[np.float32_t, ndim=2] \ * sorted_dets = dets[order, :] * _nms(&keep[0], &num_out, &sorted_dets[0, 0], boxes_num, boxes_dim, thresh, device_id) # <<<<<<<<<<<<<< * keep = keep[:num_out] * return list(order[keep]) */ __pyx_t_10 = 0; __pyx_t_11 = -1; if (__pyx_t_10 < 0) { __pyx_t_10 += __pyx_pybuffernd_keep.diminfo[0].shape; if (unlikely(__pyx_t_10 < 0)) __pyx_t_11 = 0; } else if (unlikely(__pyx_t_10 >= __pyx_pybuffernd_keep.diminfo[0].shape)) __pyx_t_11 = 0; if (unlikely(__pyx_t_11 != -1)) { __Pyx_RaiseBufferIndexError(__pyx_t_11); __PYX_ERR(0, 31, __pyx_L1_error) } __pyx_t_12 = 0; __pyx_t_13 = 0; __pyx_t_11 = -1; if (__pyx_t_12 < 0) { __pyx_t_12 += __pyx_pybuffernd_sorted_dets.diminfo[0].shape; if (unlikely(__pyx_t_12 < 0)) __pyx_t_11 = 0; } else if (unlikely(__pyx_t_12 >= __pyx_pybuffernd_sorted_dets.diminfo[0].shape)) __pyx_t_11 = 0; if (__pyx_t_13 < 0) { __pyx_t_13 += __pyx_pybuffernd_sorted_dets.diminfo[1].shape; if (unlikely(__pyx_t_13 < 0)) __pyx_t_11 = 1; } else if (unlikely(__pyx_t_13 >= __pyx_pybuffernd_sorted_dets.diminfo[1].shape)) __pyx_t_11 = 1; if (unlikely(__pyx_t_11 != -1)) { __Pyx_RaiseBufferIndexError(__pyx_t_11); __PYX_ERR(0, 31, __pyx_L1_error) } __pyx_t_14 = __pyx_PyFloat_AsFloat(__pyx_v_thresh); if (unlikely((__pyx_t_14 == (float)-1) && PyErr_Occurred())) __PYX_ERR(0, 31, __pyx_L1_error) _nms((&(*__Pyx_BufPtrStrided1d(__pyx_t_5numpy_int32_t *, __pyx_pybuffernd_keep.rcbuffer->pybuffer.buf, __pyx_t_10, __pyx_pybuffernd_keep.diminfo[0].strides))), (&__pyx_v_num_out), (&(*__Pyx_BufPtrStrided2d(__pyx_t_5numpy_float32_t *, __pyx_pybuffernd_sorted_dets.rcbuffer->pybuffer.buf, __pyx_t_12, __pyx_pybuffernd_sorted_dets.diminfo[0].strides, __pyx_t_13, __pyx_pybuffernd_sorted_dets.diminfo[1].strides))), __pyx_v_boxes_num, __pyx_v_boxes_dim, __pyx_t_14, __pyx_v_device_id); /* "nms/gpu_nms.pyx":32 * sorted_dets = dets[order, :] * _nms(&keep[0], &num_out, &sorted_dets[0, 0], boxes_num, boxes_dim, thresh, device_id) * keep = keep[:num_out] # <<<<<<<<<<<<<< * return list(order[keep]) */ __pyx_t_5 = __Pyx_PyObject_GetSlice(((PyObject *)__pyx_v_keep), 0, __pyx_v_num_out, NULL, NULL, NULL, 0, 1, 1); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 32, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_5); if (!(likely(((__pyx_t_5) == Py_None) || likely(__Pyx_TypeTest(__pyx_t_5, __pyx_ptype_5numpy_ndarray))))) __PYX_ERR(0, 32, __pyx_L1_error) __pyx_t_6 = ((PyArrayObject *)__pyx_t_5); { __Pyx_BufFmt_StackElem __pyx_stack[1]; __Pyx_SafeReleaseBuffer(&__pyx_pybuffernd_keep.rcbuffer->pybuffer); __pyx_t_11 = __Pyx_GetBufferAndValidate(&__pyx_pybuffernd_keep.rcbuffer->pybuffer, (PyObject*)__pyx_t_6, &__Pyx_TypeInfo_nn___pyx_t_5numpy_int32_t, PyBUF_FORMAT| PyBUF_STRIDES, 1, 0, __pyx_stack); if (unlikely(__pyx_t_11 < 0)) { PyErr_Fetch(&__pyx_t_15, &__pyx_t_16, &__pyx_t_17); if (unlikely(__Pyx_GetBufferAndValidate(&__pyx_pybuffernd_keep.rcbuffer->pybuffer, (PyObject*)__pyx_v_keep, &__Pyx_TypeInfo_nn___pyx_t_5numpy_int32_t, PyBUF_FORMAT| PyBUF_STRIDES, 1, 0, __pyx_stack) == -1)) { Py_XDECREF(__pyx_t_15); Py_XDECREF(__pyx_t_16); Py_XDECREF(__pyx_t_17); __Pyx_RaiseBufferFallbackError(); } else { PyErr_Restore(__pyx_t_15, __pyx_t_16, __pyx_t_17); } } __pyx_pybuffernd_keep.diminfo[0].strides = __pyx_pybuffernd_keep.rcbuffer->pybuffer.strides[0]; __pyx_pybuffernd_keep.diminfo[0].shape = __pyx_pybuffernd_keep.rcbuffer->pybuffer.shape[0]; if (unlikely(__pyx_t_11 < 0)) __PYX_ERR(0, 32, __pyx_L1_error) } __pyx_t_6 = 0; __Pyx_DECREF_SET(__pyx_v_keep, ((PyArrayObject *)__pyx_t_5)); __pyx_t_5 = 0; /* "nms/gpu_nms.pyx":33 * _nms(&keep[0], &num_out, &sorted_dets[0, 0], boxes_num, boxes_dim, thresh, device_id) * keep = keep[:num_out] * return list(order[keep]) # <<<<<<<<<<<<<< */ __Pyx_XDECREF(__pyx_r); __pyx_t_5 = PyObject_GetItem(((PyObject *)__pyx_v_order), ((PyObject *)__pyx_v_keep)); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 33, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_5); __pyx_t_1 = PySequence_List(__pyx_t_5); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 33, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0; __pyx_r = __pyx_t_1; __pyx_t_1 = 0; goto __pyx_L0; /* "nms/gpu_nms.pyx":16 * void _nms(np.int32_t*, int*, np.float32_t*, int, int, float, int) * * def gpu_nms(np.ndarray[np.float32_t, ndim=2] dets, np.float thresh, # <<<<<<<<<<<<<< * np.int32_t device_id=0): * cdef int boxes_num = dets.shape[0] */ /* function exit code */ __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_1); __Pyx_XDECREF(__pyx_t_2); __Pyx_XDECREF(__pyx_t_3); __Pyx_XDECREF(__pyx_t_4); __Pyx_XDECREF(__pyx_t_5); { PyObject *__pyx_type, *__pyx_value, *__pyx_tb; __Pyx_PyThreadState_declare __Pyx_PyThreadState_assign __Pyx_ErrFetch(&__pyx_type, &__pyx_value, &__pyx_tb); __Pyx_SafeReleaseBuffer(&__pyx_pybuffernd_dets.rcbuffer->pybuffer); __Pyx_SafeReleaseBuffer(&__pyx_pybuffernd_keep.rcbuffer->pybuffer); __Pyx_SafeReleaseBuffer(&__pyx_pybuffernd_order.rcbuffer->pybuffer); __Pyx_SafeReleaseBuffer(&__pyx_pybuffernd_scores.rcbuffer->pybuffer); __Pyx_SafeReleaseBuffer(&__pyx_pybuffernd_sorted_dets.rcbuffer->pybuffer); __Pyx_ErrRestore(__pyx_type, __pyx_value, __pyx_tb);} __Pyx_AddTraceback("nms.gpu_nms.gpu_nms", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = NULL; goto __pyx_L2; __pyx_L0:; __Pyx_SafeReleaseBuffer(&__pyx_pybuffernd_dets.rcbuffer->pybuffer); __Pyx_SafeReleaseBuffer(&__pyx_pybuffernd_keep.rcbuffer->pybuffer); __Pyx_SafeReleaseBuffer(&__pyx_pybuffernd_order.rcbuffer->pybuffer); __Pyx_SafeReleaseBuffer(&__pyx_pybuffernd_scores.rcbuffer->pybuffer); __Pyx_SafeReleaseBuffer(&__pyx_pybuffernd_sorted_dets.rcbuffer->pybuffer); __pyx_L2:; __Pyx_XDECREF((PyObject *)__pyx_v_keep); __Pyx_XDECREF((PyObject *)__pyx_v_scores); __Pyx_XDECREF((PyObject *)__pyx_v_order); __Pyx_XDECREF((PyObject *)__pyx_v_sorted_dets); __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "C:/Anaconda2/lib/site-packages/Cython/Includes/numpy/__init__.pxd":197 * # experimental exception made for __getbuffer__ and __releasebuffer__ * # -- the details of this may change. * def __getbuffer__(ndarray self, Py_buffer* info, int flags): # <<<<<<<<<<<<<< * # This implementation of getbuffer is geared towards Cython * # requirements, and does not yet fullfill the PEP. */ /* Python wrapper */ static CYTHON_UNUSED int __pyx_pw_5numpy_7ndarray_1__getbuffer__(PyObject *__pyx_v_self, Py_buffer *__pyx_v_info, int __pyx_v_flags); /*proto*/ static CYTHON_UNUSED int __pyx_pw_5numpy_7ndarray_1__getbuffer__(PyObject *__pyx_v_self, Py_buffer *__pyx_v_info, int __pyx_v_flags) { int __pyx_r; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("__getbuffer__ (wrapper)", 0); __pyx_r = __pyx_pf_5numpy_7ndarray___getbuffer__(((PyArrayObject *)__pyx_v_self), ((Py_buffer *)__pyx_v_info), ((int)__pyx_v_flags)); /* function exit code */ __Pyx_RefNannyFinishContext(); return __pyx_r; } static int __pyx_pf_5numpy_7ndarray___getbuffer__(PyArrayObject *__pyx_v_self, Py_buffer *__pyx_v_info, int __pyx_v_flags) { int __pyx_v_copy_shape; int __pyx_v_i; int __pyx_v_ndim; int __pyx_v_endian_detector; int __pyx_v_little_endian; int __pyx_v_t; char *__pyx_v_f; PyArray_Descr *__pyx_v_descr = 0; int __pyx_v_offset; int __pyx_v_hasfields; int __pyx_r; __Pyx_RefNannyDeclarations int __pyx_t_1; int __pyx_t_2; PyObject *__pyx_t_3 = NULL; int __pyx_t_4; int __pyx_t_5; PyObject *__pyx_t_6 = NULL; char *__pyx_t_7; __Pyx_RefNannySetupContext("__getbuffer__", 0); if (__pyx_v_info != NULL) { __pyx_v_info->obj = Py_None; __Pyx_INCREF(Py_None); __Pyx_GIVEREF(__pyx_v_info->obj); } /* "C:/Anaconda2/lib/site-packages/Cython/Includes/numpy/__init__.pxd":203 * # of flags * * if info == NULL: return # <<<<<<<<<<<<<< * * cdef int copy_shape, i, ndim */ __pyx_t_1 = ((__pyx_v_info == NULL) != 0); if (__pyx_t_1) { __pyx_r = 0; goto __pyx_L0; } /* "C:/Anaconda2/lib/site-packages/Cython/Includes/numpy/__init__.pxd":206 * * cdef int copy_shape, i, ndim * cdef int endian_detector = 1 # <<<<<<<<<<<<<< * cdef bint little_endian = ((<char*>&endian_detector)[0] != 0) * */ __pyx_v_endian_detector = 1; /* "C:/Anaconda2/lib/site-packages/Cython/Includes/numpy/__init__.pxd":207 * cdef int copy_shape, i, ndim * cdef int endian_detector = 1 * cdef bint little_endian = ((<char*>&endian_detector)[0] != 0) # <<<<<<<<<<<<<< * * ndim = PyArray_NDIM(self) */ __pyx_v_little_endian = ((((char *)(&__pyx_v_endian_detector))[0]) != 0); /* "C:/Anaconda2/lib/site-packages/Cython/Includes/numpy/__init__.pxd":209 * cdef bint little_endian = ((<char*>&endian_detector)[0] != 0) * * ndim = PyArray_NDIM(self) # <<<<<<<<<<<<<< * * if sizeof(npy_intp) != sizeof(Py_ssize_t): */ __pyx_v_ndim = PyArray_NDIM(__pyx_v_self); /* "C:/Anaconda2/lib/site-packages/Cython/Includes/numpy/__init__.pxd":211 * ndim = PyArray_NDIM(self) * * if sizeof(npy_intp) != sizeof(Py_ssize_t): # <<<<<<<<<<<<<< * copy_shape = 1 * else: */ __pyx_t_1 = (((sizeof(npy_intp)) != (sizeof(Py_ssize_t))) != 0); if (__pyx_t_1) { /* "C:/Anaconda2/lib/site-packages/Cython/Includes/numpy/__init__.pxd":212 * * if sizeof(npy_intp) != sizeof(Py_ssize_t): * copy_shape = 1 # <<<<<<<<<<<<<< * else: * copy_shape = 0 */ __pyx_v_copy_shape = 1; /* "C:/Anaconda2/lib/site-packages/Cython/Includes/numpy/__init__.pxd":211 * ndim = PyArray_NDIM(self) * * if sizeof(npy_intp) != sizeof(Py_ssize_t): # <<<<<<<<<<<<<< * copy_shape = 1 * else: */ goto __pyx_L4; } /* "C:/Anaconda2/lib/site-packages/Cython/Includes/numpy/__init__.pxd":214 * copy_shape = 1 * else: * copy_shape = 0 # <<<<<<<<<<<<<< * * if ((flags & pybuf.PyBUF_C_CONTIGUOUS == pybuf.PyBUF_C_CONTIGUOUS) */ /*else*/ { __pyx_v_copy_shape = 0; } __pyx_L4:; /* "C:/Anaconda2/lib/site-packages/Cython/Includes/numpy/__init__.pxd":216 * copy_shape = 0 * * if ((flags & pybuf.PyBUF_C_CONTIGUOUS == pybuf.PyBUF_C_CONTIGUOUS) # <<<<<<<<<<<<<< * and not PyArray_CHKFLAGS(self, NPY_C_CONTIGUOUS)): * raise ValueError(u"ndarray is not C contiguous") */ __pyx_t_2 = (((__pyx_v_flags & PyBUF_C_CONTIGUOUS) == PyBUF_C_CONTIGUOUS) != 0); if (__pyx_t_2) { } else { __pyx_t_1 = __pyx_t_2; goto __pyx_L6_bool_binop_done; } /* "C:/Anaconda2/lib/site-packages/Cython/Includes/numpy/__init__.pxd":217 * * if ((flags & pybuf.PyBUF_C_CONTIGUOUS == pybuf.PyBUF_C_CONTIGUOUS) * and not PyArray_CHKFLAGS(self, NPY_C_CONTIGUOUS)): # <<<<<<<<<<<<<< * raise ValueError(u"ndarray is not C contiguous") * */ __pyx_t_2 = ((!(PyArray_CHKFLAGS(__pyx_v_self, NPY_C_CONTIGUOUS) != 0)) != 0); __pyx_t_1 = __pyx_t_2; __pyx_L6_bool_binop_done:; /* "C:/Anaconda2/lib/site-packages/Cython/Includes/numpy/__init__.pxd":216 * copy_shape = 0 * * if ((flags & pybuf.PyBUF_C_CONTIGUOUS == pybuf.PyBUF_C_CONTIGUOUS) # <<<<<<<<<<<<<< * and not PyArray_CHKFLAGS(self, NPY_C_CONTIGUOUS)): * raise ValueError(u"ndarray is not C contiguous") */ if (__pyx_t_1) { /* "C:/Anaconda2/lib/site-packages/Cython/Includes/numpy/__init__.pxd":218 * if ((flags & pybuf.PyBUF_C_CONTIGUOUS == pybuf.PyBUF_C_CONTIGUOUS) * and not PyArray_CHKFLAGS(self, NPY_C_CONTIGUOUS)): * raise ValueError(u"ndarray is not C contiguous") # <<<<<<<<<<<<<< * * if ((flags & pybuf.PyBUF_F_CONTIGUOUS == pybuf.PyBUF_F_CONTIGUOUS) */ __pyx_t_3 = __Pyx_PyObject_Call(__pyx_builtin_ValueError, __pyx_tuple__5, NULL); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 218, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __Pyx_Raise(__pyx_t_3, 0, 0, 0); __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; __PYX_ERR(1, 218, __pyx_L1_error) /* "C:/Anaconda2/lib/site-packages/Cython/Includes/numpy/__init__.pxd":216 * copy_shape = 0 * * if ((flags & pybuf.PyBUF_C_CONTIGUOUS == pybuf.PyBUF_C_CONTIGUOUS) # <<<<<<<<<<<<<< * and not PyArray_CHKFLAGS(self, NPY_C_CONTIGUOUS)): * raise ValueError(u"ndarray is not C contiguous") */ } /* "C:/Anaconda2/lib/site-packages/Cython/Includes/numpy/__init__.pxd":220 * raise ValueError(u"ndarray is not C contiguous") * * if ((flags & pybuf.PyBUF_F_CONTIGUOUS == pybuf.PyBUF_F_CONTIGUOUS) # <<<<<<<<<<<<<< * and not PyArray_CHKFLAGS(self, NPY_F_CONTIGUOUS)): * raise ValueError(u"ndarray is not Fortran contiguous") */ __pyx_t_2 = (((__pyx_v_flags & PyBUF_F_CONTIGUOUS) == PyBUF_F_CONTIGUOUS) != 0); if (__pyx_t_2) { } else { __pyx_t_1 = __pyx_t_2; goto __pyx_L9_bool_binop_done; } /* "C:/Anaconda2/lib/site-packages/Cython/Includes/numpy/__init__.pxd":221 * * if ((flags & pybuf.PyBUF_F_CONTIGUOUS == pybuf.PyBUF_F_CONTIGUOUS) * and not PyArray_CHKFLAGS(self, NPY_F_CONTIGUOUS)): # <<<<<<<<<<<<<< * raise ValueError(u"ndarray is not Fortran contiguous") * */ __pyx_t_2 = ((!(PyArray_CHKFLAGS(__pyx_v_self, NPY_F_CONTIGUOUS) != 0)) != 0); __pyx_t_1 = __pyx_t_2; __pyx_L9_bool_binop_done:; /* "C:/Anaconda2/lib/site-packages/Cython/Includes/numpy/__init__.pxd":220 * raise ValueError(u"ndarray is not C contiguous") * * if ((flags & pybuf.PyBUF_F_CONTIGUOUS == pybuf.PyBUF_F_CONTIGUOUS) # <<<<<<<<<<<<<< * and not PyArray_CHKFLAGS(self, NPY_F_CONTIGUOUS)): * raise ValueError(u"ndarray is not Fortran contiguous") */ if (__pyx_t_1) { /* "C:/Anaconda2/lib/site-packages/Cython/Includes/numpy/__init__.pxd":222 * if ((flags & pybuf.PyBUF_F_CONTIGUOUS == pybuf.PyBUF_F_CONTIGUOUS) * and not PyArray_CHKFLAGS(self, NPY_F_CONTIGUOUS)): * raise ValueError(u"ndarray is not Fortran contiguous") # <<<<<<<<<<<<<< * * info.buf = PyArray_DATA(self) */ __pyx_t_3 = __Pyx_PyObject_Call(__pyx_builtin_ValueError, __pyx_tuple__6, NULL); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 222, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __Pyx_Raise(__pyx_t_3, 0, 0, 0); __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; __PYX_ERR(1, 222, __pyx_L1_error) /* "C:/Anaconda2/lib/site-packages/Cython/Includes/numpy/__init__.pxd":220 * raise ValueError(u"ndarray is not C contiguous") * * if ((flags & pybuf.PyBUF_F_CONTIGUOUS == pybuf.PyBUF_F_CONTIGUOUS) # <<<<<<<<<<<<<< * and not PyArray_CHKFLAGS(self, NPY_F_CONTIGUOUS)): * raise ValueError(u"ndarray is not Fortran contiguous") */ } /* "C:/Anaconda2/lib/site-packages/Cython/Includes/numpy/__init__.pxd":224 * raise ValueError(u"ndarray is not Fortran contiguous") * * info.buf = PyArray_DATA(self) # <<<<<<<<<<<<<< * info.ndim = ndim * if copy_shape: */ __pyx_v_info->buf = PyArray_DATA(__pyx_v_self); /* "C:/Anaconda2/lib/site-packages/Cython/Includes/numpy/__init__.pxd":225 * * info.buf = PyArray_DATA(self) * info.ndim = ndim # <<<<<<<<<<<<<< * if copy_shape: * # Allocate new buffer for strides and shape info. */ __pyx_v_info->ndim = __pyx_v_ndim; /* "C:/Anaconda2/lib/site-packages/Cython/Includes/numpy/__init__.pxd":226 * info.buf = PyArray_DATA(self) * info.ndim = ndim * if copy_shape: # <<<<<<<<<<<<<< * # Allocate new buffer for strides and shape info. * # This is allocated as one block, strides first. */ __pyx_t_1 = (__pyx_v_copy_shape != 0); if (__pyx_t_1) { /* "C:/Anaconda2/lib/site-packages/Cython/Includes/numpy/__init__.pxd":229 * # Allocate new buffer for strides and shape info. * # This is allocated as one block, strides first. * info.strides = <Py_ssize_t*>stdlib.malloc(sizeof(Py_ssize_t) * <size_t>ndim * 2) # <<<<<<<<<<<<<< * info.shape = info.strides + ndim * for i in range(ndim): */ __pyx_v_info->strides = ((Py_ssize_t *)malloc((((sizeof(Py_ssize_t)) * ((size_t)__pyx_v_ndim)) * 2))); /* "C:/Anaconda2/lib/site-packages/Cython/Includes/numpy/__init__.pxd":230 * # This is allocated as one block, strides first. * info.strides = <Py_ssize_t*>stdlib.malloc(sizeof(Py_ssize_t) * <size_t>ndim * 2) * info.shape = info.strides + ndim # <<<<<<<<<<<<<< * for i in range(ndim): * info.strides[i] = PyArray_STRIDES(self)[i] */ __pyx_v_info->shape = (__pyx_v_info->strides + __pyx_v_ndim); /* "C:/Anaconda2/lib/site-packages/Cython/Includes/numpy/__init__.pxd":231 * info.strides = <Py_ssize_t*>stdlib.malloc(sizeof(Py_ssize_t) * <size_t>ndim * 2) * info.shape = info.strides + ndim * for i in range(ndim): # <<<<<<<<<<<<<< * info.strides[i] = PyArray_STRIDES(self)[i] * info.shape[i] = PyArray_DIMS(self)[i] */ __pyx_t_4 = __pyx_v_ndim; for (__pyx_t_5 = 0; __pyx_t_5 < __pyx_t_4; __pyx_t_5+=1) { __pyx_v_i = __pyx_t_5; /* "C:/Anaconda2/lib/site-packages/Cython/Includes/numpy/__init__.pxd":232 * info.shape = info.strides + ndim * for i in range(ndim): * info.strides[i] = PyArray_STRIDES(self)[i] # <<<<<<<<<<<<<< * info.shape[i] = PyArray_DIMS(self)[i] * else: */ (__pyx_v_info->strides[__pyx_v_i]) = (PyArray_STRIDES(__pyx_v_self)[__pyx_v_i]); /* "C:/Anaconda2/lib/site-packages/Cython/Includes/numpy/__init__.pxd":233 * for i in range(ndim): * info.strides[i] = PyArray_STRIDES(self)[i] * info.shape[i] = PyArray_DIMS(self)[i] # <<<<<<<<<<<<<< * else: * info.strides = <Py_ssize_t*>PyArray_STRIDES(self) */ (__pyx_v_info->shape[__pyx_v_i]) = (PyArray_DIMS(__pyx_v_self)[__pyx_v_i]); } /* "C:/Anaconda2/lib/site-packages/Cython/Includes/numpy/__init__.pxd":226 * info.buf = PyArray_DATA(self) * info.ndim = ndim * if copy_shape: # <<<<<<<<<<<<<< * # Allocate new buffer for strides and shape info. * # This is allocated as one block, strides first. */ goto __pyx_L11; } /* "C:/Anaconda2/lib/site-packages/Cython/Includes/numpy/__init__.pxd":235 * info.shape[i] = PyArray_DIMS(self)[i] * else: * info.strides = <Py_ssize_t*>PyArray_STRIDES(self) # <<<<<<<<<<<<<< * info.shape = <Py_ssize_t*>PyArray_DIMS(self) * info.suboffsets = NULL */ /*else*/ { __pyx_v_info->strides = ((Py_ssize_t *)PyArray_STRIDES(__pyx_v_self)); /* "C:/Anaconda2/lib/site-packages/Cython/Includes/numpy/__init__.pxd":236 * else: * info.strides = <Py_ssize_t*>PyArray_STRIDES(self) * info.shape = <Py_ssize_t*>PyArray_DIMS(self) # <<<<<<<<<<<<<< * info.suboffsets = NULL * info.itemsize = PyArray_ITEMSIZE(self) */ __pyx_v_info->shape = ((Py_ssize_t *)PyArray_DIMS(__pyx_v_self)); } __pyx_L11:; /* "C:/Anaconda2/lib/site-packages/Cython/Includes/numpy/__init__.pxd":237 * info.strides = <Py_ssize_t*>PyArray_STRIDES(self) * info.shape = <Py_ssize_t*>PyArray_DIMS(self) * info.suboffsets = NULL # <<<<<<<<<<<<<< * info.itemsize = PyArray_ITEMSIZE(self) * info.readonly = not PyArray_ISWRITEABLE(self) */ __pyx_v_info->suboffsets = NULL; /* "C:/Anaconda2/lib/site-packages/Cython/Includes/numpy/__init__.pxd":238 * info.shape = <Py_ssize_t*>PyArray_DIMS(self) * info.suboffsets = NULL * info.itemsize = PyArray_ITEMSIZE(self) # <<<<<<<<<<<<<< * info.readonly = not PyArray_ISWRITEABLE(self) * */ __pyx_v_info->itemsize = PyArray_ITEMSIZE(__pyx_v_self); /* "C:/Anaconda2/lib/site-packages/Cython/Includes/numpy/__init__.pxd":239 * info.suboffsets = NULL * info.itemsize = PyArray_ITEMSIZE(self) * info.readonly = not PyArray_ISWRITEABLE(self) # <<<<<<<<<<<<<< * * cdef int t */ __pyx_v_info->readonly = (!(PyArray_ISWRITEABLE(__pyx_v_self) != 0)); /* "C:/Anaconda2/lib/site-packages/Cython/Includes/numpy/__init__.pxd":242 * * cdef int t * cdef char* f = NULL # <<<<<<<<<<<<<< * cdef dtype descr = self.descr * cdef int offset */ __pyx_v_f = NULL; /* "C:/Anaconda2/lib/site-packages/Cython/Includes/numpy/__init__.pxd":243 * cdef int t * cdef char* f = NULL * cdef dtype descr = self.descr # <<<<<<<<<<<<<< * cdef int offset * */ __pyx_t_3 = ((PyObject *)__pyx_v_self->descr); __Pyx_INCREF(__pyx_t_3); __pyx_v_descr = ((PyArray_Descr *)__pyx_t_3); __pyx_t_3 = 0; /* "C:/Anaconda2/lib/site-packages/Cython/Includes/numpy/__init__.pxd":246 * cdef int offset * * cdef bint hasfields = PyDataType_HASFIELDS(descr) # <<<<<<<<<<<<<< * * if not hasfields and not copy_shape: */ __pyx_v_hasfields = PyDataType_HASFIELDS(__pyx_v_descr); /* "C:/Anaconda2/lib/site-packages/Cython/Includes/numpy/__init__.pxd":248 * cdef bint hasfields = PyDataType_HASFIELDS(descr) * * if not hasfields and not copy_shape: # <<<<<<<<<<<<<< * # do not call releasebuffer * info.obj = None */ __pyx_t_2 = ((!(__pyx_v_hasfields != 0)) != 0); if (__pyx_t_2) { } else { __pyx_t_1 = __pyx_t_2; goto __pyx_L15_bool_binop_done; } __pyx_t_2 = ((!(__pyx_v_copy_shape != 0)) != 0); __pyx_t_1 = __pyx_t_2; __pyx_L15_bool_binop_done:; if (__pyx_t_1) { /* "C:/Anaconda2/lib/site-packages/Cython/Includes/numpy/__init__.pxd":250 * if not hasfields and not copy_shape: * # do not call releasebuffer * info.obj = None # <<<<<<<<<<<<<< * else: * # need to call releasebuffer */ __Pyx_INCREF(Py_None); __Pyx_GIVEREF(Py_None); __Pyx_GOTREF(__pyx_v_info->obj); __Pyx_DECREF(__pyx_v_info->obj); __pyx_v_info->obj = Py_None; /* "C:/Anaconda2/lib/site-packages/Cython/Includes/numpy/__init__.pxd":248 * cdef bint hasfields = PyDataType_HASFIELDS(descr) * * if not hasfields and not copy_shape: # <<<<<<<<<<<<<< * # do not call releasebuffer * info.obj = None */ goto __pyx_L14; } /* "C:/Anaconda2/lib/site-packages/Cython/Includes/numpy/__init__.pxd":253 * else: * # need to call releasebuffer * info.obj = self # <<<<<<<<<<<<<< * * if not hasfields: */ /*else*/ { __Pyx_INCREF(((PyObject *)__pyx_v_self)); __Pyx_GIVEREF(((PyObject *)__pyx_v_self)); __Pyx_GOTREF(__pyx_v_info->obj); __Pyx_DECREF(__pyx_v_info->obj); __pyx_v_info->obj = ((PyObject *)__pyx_v_self); } __pyx_L14:; /* "C:/Anaconda2/lib/site-packages/Cython/Includes/numpy/__init__.pxd":255 * info.obj = self * * if not hasfields: # <<<<<<<<<<<<<< * t = descr.type_num * if ((descr.byteorder == c'>' and little_endian) or */ __pyx_t_1 = ((!(__pyx_v_hasfields != 0)) != 0); if (__pyx_t_1) { /* "C:/Anaconda2/lib/site-packages/Cython/Includes/numpy/__init__.pxd":256 * * if not hasfields: * t = descr.type_num # <<<<<<<<<<<<<< * if ((descr.byteorder == c'>' and little_endian) or * (descr.byteorder == c'<' and not little_endian)): */ __pyx_t_4 = __pyx_v_descr->type_num; __pyx_v_t = __pyx_t_4; /* "C:/Anaconda2/lib/site-packages/Cython/Includes/numpy/__init__.pxd":257 * if not hasfields: * t = descr.type_num * if ((descr.byteorder == c'>' and little_endian) or # <<<<<<<<<<<<<< * (descr.byteorder == c'<' and not little_endian)): * raise ValueError(u"Non-native byte order not supported") */ __pyx_t_2 = ((__pyx_v_descr->byteorder == '>') != 0); if (!__pyx_t_2) { goto __pyx_L20_next_or; } else { } __pyx_t_2 = (__pyx_v_little_endian != 0); if (!__pyx_t_2) { } else { __pyx_t_1 = __pyx_t_2; goto __pyx_L19_bool_binop_done; } __pyx_L20_next_or:; /* "C:/Anaconda2/lib/site-packages/Cython/Includes/numpy/__init__.pxd":258 * t = descr.type_num * if ((descr.byteorder == c'>' and little_endian) or * (descr.byteorder == c'<' and not little_endian)): # <<<<<<<<<<<<<< * raise ValueError(u"Non-native byte order not supported") * if t == NPY_BYTE: f = "b" */ __pyx_t_2 = ((__pyx_v_descr->byteorder == '<') != 0); if (__pyx_t_2) { } else { __pyx_t_1 = __pyx_t_2; goto __pyx_L19_bool_binop_done; } __pyx_t_2 = ((!(__pyx_v_little_endian != 0)) != 0); __pyx_t_1 = __pyx_t_2; __pyx_L19_bool_binop_done:; /* "C:/Anaconda2/lib/site-packages/Cython/Includes/numpy/__init__.pxd":257 * if not hasfields: * t = descr.type_num * if ((descr.byteorder == c'>' and little_endian) or # <<<<<<<<<<<<<< * (descr.byteorder == c'<' and not little_endian)): * raise ValueError(u"Non-native byte order not supported") */ if (__pyx_t_1) { /* "C:/Anaconda2/lib/site-packages/Cython/Includes/numpy/__init__.pxd":259 * if ((descr.byteorder == c'>' and little_endian) or * (descr.byteorder == c'<' and not little_endian)): * raise ValueError(u"Non-native byte order not supported") # <<<<<<<<<<<<<< * if t == NPY_BYTE: f = "b" * elif t == NPY_UBYTE: f = "B" */ __pyx_t_3 = __Pyx_PyObject_Call(__pyx_builtin_ValueError, __pyx_tuple__7, NULL); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 259, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __Pyx_Raise(__pyx_t_3, 0, 0, 0); __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; __PYX_ERR(1, 259, __pyx_L1_error) /* "C:/Anaconda2/lib/site-packages/Cython/Includes/numpy/__init__.pxd":257 * if not hasfields: * t = descr.type_num * if ((descr.byteorder == c'>' and little_endian) or # <<<<<<<<<<<<<< * (descr.byteorder == c'<' and not little_endian)): * raise ValueError(u"Non-native byte order not supported") */ } /* "C:/Anaconda2/lib/site-packages/Cython/Includes/numpy/__init__.pxd":260 * (descr.byteorder == c'<' and not little_endian)): * raise ValueError(u"Non-native byte order not supported") * if t == NPY_BYTE: f = "b" # <<<<<<<<<<<<<< * elif t == NPY_UBYTE: f = "B" * elif t == NPY_SHORT: f = "h" */ switch (__pyx_v_t) { case NPY_BYTE: __pyx_v_f = ((char *)"b"); break; /* "C:/Anaconda2/lib/site-packages/Cython/Includes/numpy/__init__.pxd":261 * raise ValueError(u"Non-native byte order not supported") * if t == NPY_BYTE: f = "b" * elif t == NPY_UBYTE: f = "B" # <<<<<<<<<<<<<< * elif t == NPY_SHORT: f = "h" * elif t == NPY_USHORT: f = "H" */ case NPY_UBYTE: __pyx_v_f = ((char *)"B"); break; /* "C:/Anaconda2/lib/site-packages/Cython/Includes/numpy/__init__.pxd":262 * if t == NPY_BYTE: f = "b" * elif t == NPY_UBYTE: f = "B" * elif t == NPY_SHORT: f = "h" # <<<<<<<<<<<<<< * elif t == NPY_USHORT: f = "H" * elif t == NPY_INT: f = "i" */ case NPY_SHORT: __pyx_v_f = ((char *)"h"); break; /* "C:/Anaconda2/lib/site-packages/Cython/Includes/numpy/__init__.pxd":263 * elif t == NPY_UBYTE: f = "B" * elif t == NPY_SHORT: f = "h" * elif t == NPY_USHORT: f = "H" # <<<<<<<<<<<<<< * elif t == NPY_INT: f = "i" * elif t == NPY_UINT: f = "I" */ case NPY_USHORT: __pyx_v_f = ((char *)"H"); break; /* "C:/Anaconda2/lib/site-packages/Cython/Includes/numpy/__init__.pxd":264 * elif t == NPY_SHORT: f = "h" * elif t == NPY_USHORT: f = "H" * elif t == NPY_INT: f = "i" # <<<<<<<<<<<<<< * elif t == NPY_UINT: f = "I" * elif t == NPY_LONG: f = "l" */ case NPY_INT: __pyx_v_f = ((char *)"i"); break; /* "C:/Anaconda2/lib/site-packages/Cython/Includes/numpy/__init__.pxd":265 * elif t == NPY_USHORT: f = "H" * elif t == NPY_INT: f = "i" * elif t == NPY_UINT: f = "I" # <<<<<<<<<<<<<< * elif t == NPY_LONG: f = "l" * elif t == NPY_ULONG: f = "L" */ case NPY_UINT: __pyx_v_f = ((char *)"I"); break; /* "C:/Anaconda2/lib/site-packages/Cython/Includes/numpy/__init__.pxd":266 * elif t == NPY_INT: f = "i" * elif t == NPY_UINT: f = "I" * elif t == NPY_LONG: f = "l" # <<<<<<<<<<<<<< * elif t == NPY_ULONG: f = "L" * elif t == NPY_LONGLONG: f = "q" */ case NPY_LONG: __pyx_v_f = ((char *)"l"); break; /* "C:/Anaconda2/lib/site-packages/Cython/Includes/numpy/__init__.pxd":267 * elif t == NPY_UINT: f = "I" * elif t == NPY_LONG: f = "l" * elif t == NPY_ULONG: f = "L" # <<<<<<<<<<<<<< * elif t == NPY_LONGLONG: f = "q" * elif t == NPY_ULONGLONG: f = "Q" */ case NPY_ULONG: __pyx_v_f = ((char *)"L"); break; /* "C:/Anaconda2/lib/site-packages/Cython/Includes/numpy/__init__.pxd":268 * elif t == NPY_LONG: f = "l" * elif t == NPY_ULONG: f = "L" * elif t == NPY_LONGLONG: f = "q" # <<<<<<<<<<<<<< * elif t == NPY_ULONGLONG: f = "Q" * elif t == NPY_FLOAT: f = "f" */ case NPY_LONGLONG: __pyx_v_f = ((char *)"q"); break; /* "C:/Anaconda2/lib/site-packages/Cython/Includes/numpy/__init__.pxd":269 * elif t == NPY_ULONG: f = "L" * elif t == NPY_LONGLONG: f = "q" * elif t == NPY_ULONGLONG: f = "Q" # <<<<<<<<<<<<<< * elif t == NPY_FLOAT: f = "f" * elif t == NPY_DOUBLE: f = "d" */ case NPY_ULONGLONG: __pyx_v_f = ((char *)"Q"); break; /* "C:/Anaconda2/lib/site-packages/Cython/Includes/numpy/__init__.pxd":270 * elif t == NPY_LONGLONG: f = "q" * elif t == NPY_ULONGLONG: f = "Q" * elif t == NPY_FLOAT: f = "f" # <<<<<<<<<<<<<< * elif t == NPY_DOUBLE: f = "d" * elif t == NPY_LONGDOUBLE: f = "g" */ case NPY_FLOAT: __pyx_v_f = ((char *)"f"); break; /* "C:/Anaconda2/lib/site-packages/Cython/Includes/numpy/__init__.pxd":271 * elif t == NPY_ULONGLONG: f = "Q" * elif t == NPY_FLOAT: f = "f" * elif t == NPY_DOUBLE: f = "d" # <<<<<<<<<<<<<< * elif t == NPY_LONGDOUBLE: f = "g" * elif t == NPY_CFLOAT: f = "Zf" */ case NPY_DOUBLE: __pyx_v_f = ((char *)"d"); break; /* "C:/Anaconda2/lib/site-packages/Cython/Includes/numpy/__init__.pxd":272 * elif t == NPY_FLOAT: f = "f" * elif t == NPY_DOUBLE: f = "d" * elif t == NPY_LONGDOUBLE: f = "g" # <<<<<<<<<<<<<< * elif t == NPY_CFLOAT: f = "Zf" * elif t == NPY_CDOUBLE: f = "Zd" */ case NPY_LONGDOUBLE: __pyx_v_f = ((char *)"g"); break; /* "C:/Anaconda2/lib/site-packages/Cython/Includes/numpy/__init__.pxd":273 * elif t == NPY_DOUBLE: f = "d" * elif t == NPY_LONGDOUBLE: f = "g" * elif t == NPY_CFLOAT: f = "Zf" # <<<<<<<<<<<<<< * elif t == NPY_CDOUBLE: f = "Zd" * elif t == NPY_CLONGDOUBLE: f = "Zg" */ case NPY_CFLOAT: __pyx_v_f = ((char *)"Zf"); break; /* "C:/Anaconda2/lib/site-packages/Cython/Includes/numpy/__init__.pxd":274 * elif t == NPY_LONGDOUBLE: f = "g" * elif t == NPY_CFLOAT: f = "Zf" * elif t == NPY_CDOUBLE: f = "Zd" # <<<<<<<<<<<<<< * elif t == NPY_CLONGDOUBLE: f = "Zg" * elif t == NPY_OBJECT: f = "O" */ case NPY_CDOUBLE: __pyx_v_f = ((char *)"Zd"); break; /* "C:/Anaconda2/lib/site-packages/Cython/Includes/numpy/__init__.pxd":275 * elif t == NPY_CFLOAT: f = "Zf" * elif t == NPY_CDOUBLE: f = "Zd" * elif t == NPY_CLONGDOUBLE: f = "Zg" # <<<<<<<<<<<<<< * elif t == NPY_OBJECT: f = "O" * else: */ case NPY_CLONGDOUBLE: __pyx_v_f = ((char *)"Zg"); break; /* "C:/Anaconda2/lib/site-packages/Cython/Includes/numpy/__init__.pxd":276 * elif t == NPY_CDOUBLE: f = "Zd" * elif t == NPY_CLONGDOUBLE: f = "Zg" * elif t == NPY_OBJECT: f = "O" # <<<<<<<<<<<<<< * else: * raise ValueError(u"unknown dtype code in numpy.pxd (%d)" % t) */ case NPY_OBJECT: __pyx_v_f = ((char *)"O"); break; default: /* "C:/Anaconda2/lib/site-packages/Cython/Includes/numpy/__init__.pxd":278 * elif t == NPY_OBJECT: f = "O" * else: * raise ValueError(u"unknown dtype code in numpy.pxd (%d)" % t) # <<<<<<<<<<<<<< * info.format = f * return */ __pyx_t_3 = __Pyx_PyInt_From_int(__pyx_v_t); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 278, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __pyx_t_6 = PyUnicode_Format(__pyx_kp_u_unknown_dtype_code_in_numpy_pxd, __pyx_t_3); if (unlikely(!__pyx_t_6)) __PYX_ERR(1, 278, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_6); __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; __pyx_t_3 = PyTuple_New(1); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 278, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __Pyx_GIVEREF(__pyx_t_6); PyTuple_SET_ITEM(__pyx_t_3, 0, __pyx_t_6); __pyx_t_6 = 0; __pyx_t_6 = __Pyx_PyObject_Call(__pyx_builtin_ValueError, __pyx_t_3, NULL); if (unlikely(!__pyx_t_6)) __PYX_ERR(1, 278, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_6); __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; __Pyx_Raise(__pyx_t_6, 0, 0, 0); __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0; __PYX_ERR(1, 278, __pyx_L1_error) break; } /* "C:/Anaconda2/lib/site-packages/Cython/Includes/numpy/__init__.pxd":279 * else: * raise ValueError(u"unknown dtype code in numpy.pxd (%d)" % t) * info.format = f # <<<<<<<<<<<<<< * return * else: */ __pyx_v_info->format = __pyx_v_f; /* "C:/Anaconda2/lib/site-packages/Cython/Includes/numpy/__init__.pxd":280 * raise ValueError(u"unknown dtype code in numpy.pxd (%d)" % t) * info.format = f * return # <<<<<<<<<<<<<< * else: * info.format = <char*>stdlib.malloc(_buffer_format_string_len) */ __pyx_r = 0; goto __pyx_L0; /* "C:/Anaconda2/lib/site-packages/Cython/Includes/numpy/__init__.pxd":255 * info.obj = self * * if not hasfields: # <<<<<<<<<<<<<< * t = descr.type_num * if ((descr.byteorder == c'>' and little_endian) or */ } /* "C:/Anaconda2/lib/site-packages/Cython/Includes/numpy/__init__.pxd":282 * return * else: * info.format = <char*>stdlib.malloc(_buffer_format_string_len) # <<<<<<<<<<<<<< * info.format[0] = c'^' # Native data types, manual alignment * offset = 0 */ /*else*/ { __pyx_v_info->format = ((char *)malloc(0xFF)); /* "C:/Anaconda2/lib/site-packages/Cython/Includes/numpy/__init__.pxd":283 * else: * info.format = <char*>stdlib.malloc(_buffer_format_string_len) * info.format[0] = c'^' # Native data types, manual alignment # <<<<<<<<<<<<<< * offset = 0 * f = _util_dtypestring(descr, info.format + 1, */ (__pyx_v_info->format[0]) = '^'; /* "C:/Anaconda2/lib/site-packages/Cython/Includes/numpy/__init__.pxd":284 * info.format = <char*>stdlib.malloc(_buffer_format_string_len) * info.format[0] = c'^' # Native data types, manual alignment * offset = 0 # <<<<<<<<<<<<<< * f = _util_dtypestring(descr, info.format + 1, * info.format + _buffer_format_string_len, */ __pyx_v_offset = 0; /* "C:/Anaconda2/lib/site-packages/Cython/Includes/numpy/__init__.pxd":285 * info.format[0] = c'^' # Native data types, manual alignment * offset = 0 * f = _util_dtypestring(descr, info.format + 1, # <<<<<<<<<<<<<< * info.format + _buffer_format_string_len, * &offset) */ __pyx_t_7 = __pyx_f_5numpy__util_dtypestring(__pyx_v_descr, (__pyx_v_info->format + 1), (__pyx_v_info->format + 0xFF), (&__pyx_v_offset)); if (unlikely(__pyx_t_7 == NULL)) __PYX_ERR(1, 285, __pyx_L1_error) __pyx_v_f = __pyx_t_7; /* "C:/Anaconda2/lib/site-packages/Cython/Includes/numpy/__init__.pxd":288 * info.format + _buffer_format_string_len, * &offset) * f[0] = c'\0' # Terminate format string # <<<<<<<<<<<<<< * * def __releasebuffer__(ndarray self, Py_buffer* info): */ (__pyx_v_f[0]) = '\x00'; } /* "C:/Anaconda2/lib/site-packages/Cython/Includes/numpy/__init__.pxd":197 * # experimental exception made for __getbuffer__ and __releasebuffer__ * # -- the details of this may change. * def __getbuffer__(ndarray self, Py_buffer* info, int flags): # <<<<<<<<<<<<<< * # This implementation of getbuffer is geared towards Cython * # requirements, and does not yet fullfill the PEP. */ /* function exit code */ __pyx_r = 0; goto __pyx_L0; __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_3); __Pyx_XDECREF(__pyx_t_6); __Pyx_AddTraceback("numpy.ndarray.__getbuffer__", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = -1; if (__pyx_v_info != NULL && __pyx_v_info->obj != NULL) { __Pyx_GOTREF(__pyx_v_info->obj); __Pyx_DECREF(__pyx_v_info->obj); __pyx_v_info->obj = NULL; } goto __pyx_L2; __pyx_L0:; if (__pyx_v_info != NULL && __pyx_v_info->obj == Py_None) { __Pyx_GOTREF(Py_None); __Pyx_DECREF(Py_None); __pyx_v_info->obj = NULL; } __pyx_L2:; __Pyx_XDECREF((PyObject *)__pyx_v_descr); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "C:/Anaconda2/lib/site-packages/Cython/Includes/numpy/__init__.pxd":290 * f[0] = c'\0' # Terminate format string * * def __releasebuffer__(ndarray self, Py_buffer* info): # <<<<<<<<<<<<<< * if PyArray_HASFIELDS(self): * stdlib.free(info.format) */ /* Python wrapper */ static CYTHON_UNUSED void __pyx_pw_5numpy_7ndarray_3__releasebuffer__(PyObject *__pyx_v_self, Py_buffer *__pyx_v_info); /*proto*/ static CYTHON_UNUSED void __pyx_pw_5numpy_7ndarray_3__releasebuffer__(PyObject *__pyx_v_self, Py_buffer *__pyx_v_info) { __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("__releasebuffer__ (wrapper)", 0); __pyx_pf_5numpy_7ndarray_2__releasebuffer__(((PyArrayObject *)__pyx_v_self), ((Py_buffer *)__pyx_v_info)); /* function exit code */ __Pyx_RefNannyFinishContext(); } static void __pyx_pf_5numpy_7ndarray_2__releasebuffer__(PyArrayObject *__pyx_v_self, Py_buffer *__pyx_v_info) { __Pyx_RefNannyDeclarations int __pyx_t_1; __Pyx_RefNannySetupContext("__releasebuffer__", 0); /* "C:/Anaconda2/lib/site-packages/Cython/Includes/numpy/__init__.pxd":291 * * def __releasebuffer__(ndarray self, Py_buffer* info): * if PyArray_HASFIELDS(self): # <<<<<<<<<<<<<< * stdlib.free(info.format) * if sizeof(npy_intp) != sizeof(Py_ssize_t): */ __pyx_t_1 = (PyArray_HASFIELDS(__pyx_v_self) != 0); if (__pyx_t_1) { /* "C:/Anaconda2/lib/site-packages/Cython/Includes/numpy/__init__.pxd":292 * def __releasebuffer__(ndarray self, Py_buffer* info): * if PyArray_HASFIELDS(self): * stdlib.free(info.format) # <<<<<<<<<<<<<< * if sizeof(npy_intp) != sizeof(Py_ssize_t): * stdlib.free(info.strides) */ free(__pyx_v_info->format); /* "C:/Anaconda2/lib/site-packages/Cython/Includes/numpy/__init__.pxd":291 * * def __releasebuffer__(ndarray self, Py_buffer* info): * if PyArray_HASFIELDS(self): # <<<<<<<<<<<<<< * stdlib.free(info.format) * if sizeof(npy_intp) != sizeof(Py_ssize_t): */ } /* "C:/Anaconda2/lib/site-packages/Cython/Includes/numpy/__init__.pxd":293 * if PyArray_HASFIELDS(self): * stdlib.free(info.format) * if sizeof(npy_intp) != sizeof(Py_ssize_t): # <<<<<<<<<<<<<< * stdlib.free(info.strides) * # info.shape was stored after info.strides in the same block */ __pyx_t_1 = (((sizeof(npy_intp)) != (sizeof(Py_ssize_t))) != 0); if (__pyx_t_1) { /* "C:/Anaconda2/lib/site-packages/Cython/Includes/numpy/__init__.pxd":294 * stdlib.free(info.format) * if sizeof(npy_intp) != sizeof(Py_ssize_t): * stdlib.free(info.strides) # <<<<<<<<<<<<<< * # info.shape was stored after info.strides in the same block * */ free(__pyx_v_info->strides); /* "C:/Anaconda2/lib/site-packages/Cython/Includes/numpy/__init__.pxd":293 * if PyArray_HASFIELDS(self): * stdlib.free(info.format) * if sizeof(npy_intp) != sizeof(Py_ssize_t): # <<<<<<<<<<<<<< * stdlib.free(info.strides) * # info.shape was stored after info.strides in the same block */ } /* "C:/Anaconda2/lib/site-packages/Cython/Includes/numpy/__init__.pxd":290 * f[0] = c'\0' # Terminate format string * * def __releasebuffer__(ndarray self, Py_buffer* info): # <<<<<<<<<<<<<< * if PyArray_HASFIELDS(self): * stdlib.free(info.format) */ /* function exit code */ __Pyx_RefNannyFinishContext(); } /* "C:/Anaconda2/lib/site-packages/Cython/Includes/numpy/__init__.pxd":770 * ctypedef npy_cdouble complex_t * * cdef inline object PyArray_MultiIterNew1(a): # <<<<<<<<<<<<<< * return PyArray_MultiIterNew(1, <void*>a) * */ static CYTHON_INLINE PyObject *__pyx_f_5numpy_PyArray_MultiIterNew1(PyObject *__pyx_v_a) { PyObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations PyObject *__pyx_t_1 = NULL; __Pyx_RefNannySetupContext("PyArray_MultiIterNew1", 0); /* "C:/Anaconda2/lib/site-packages/Cython/Includes/numpy/__init__.pxd":771 * * cdef inline object PyArray_MultiIterNew1(a): * return PyArray_MultiIterNew(1, <void*>a) # <<<<<<<<<<<<<< * * cdef inline object PyArray_MultiIterNew2(a, b): */ __Pyx_XDECREF(__pyx_r); __pyx_t_1 = PyArray_MultiIterNew(1, ((void *)__pyx_v_a)); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 771, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __pyx_r = __pyx_t_1; __pyx_t_1 = 0; goto __pyx_L0; /* "C:/Anaconda2/lib/site-packages/Cython/Includes/numpy/__init__.pxd":770 * ctypedef npy_cdouble complex_t * * cdef inline object PyArray_MultiIterNew1(a): # <<<<<<<<<<<<<< * return PyArray_MultiIterNew(1, <void*>a) * */ /* function exit code */ __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_1); __Pyx_AddTraceback("numpy.PyArray_MultiIterNew1", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = 0; __pyx_L0:; __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "C:/Anaconda2/lib/site-packages/Cython/Includes/numpy/__init__.pxd":773 * return PyArray_MultiIterNew(1, <void*>a) * * cdef inline object PyArray_MultiIterNew2(a, b): # <<<<<<<<<<<<<< * return PyArray_MultiIterNew(2, <void*>a, <void*>b) * */ static CYTHON_INLINE PyObject *__pyx_f_5numpy_PyArray_MultiIterNew2(PyObject *__pyx_v_a, PyObject *__pyx_v_b) { PyObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations PyObject *__pyx_t_1 = NULL; __Pyx_RefNannySetupContext("PyArray_MultiIterNew2", 0); /* "C:/Anaconda2/lib/site-packages/Cython/Includes/numpy/__init__.pxd":774 * * cdef inline object PyArray_MultiIterNew2(a, b): * return PyArray_MultiIterNew(2, <void*>a, <void*>b) # <<<<<<<<<<<<<< * * cdef inline object PyArray_MultiIterNew3(a, b, c): */ __Pyx_XDECREF(__pyx_r); __pyx_t_1 = PyArray_MultiIterNew(2, ((void *)__pyx_v_a), ((void *)__pyx_v_b)); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 774, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __pyx_r = __pyx_t_1; __pyx_t_1 = 0; goto __pyx_L0; /* "C:/Anaconda2/lib/site-packages/Cython/Includes/numpy/__init__.pxd":773 * return PyArray_MultiIterNew(1, <void*>a) * * cdef inline object PyArray_MultiIterNew2(a, b): # <<<<<<<<<<<<<< * return PyArray_MultiIterNew(2, <void*>a, <void*>b) * */ /* function exit code */ __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_1); __Pyx_AddTraceback("numpy.PyArray_MultiIterNew2", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = 0; __pyx_L0:; __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "C:/Anaconda2/lib/site-packages/Cython/Includes/numpy/__init__.pxd":776 * return PyArray_MultiIterNew(2, <void*>a, <void*>b) * * cdef inline object PyArray_MultiIterNew3(a, b, c): # <<<<<<<<<<<<<< * return PyArray_MultiIterNew(3, <void*>a, <void*>b, <void*> c) * */ static CYTHON_INLINE PyObject *__pyx_f_5numpy_PyArray_MultiIterNew3(PyObject *__pyx_v_a, PyObject *__pyx_v_b, PyObject *__pyx_v_c) { PyObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations PyObject *__pyx_t_1 = NULL; __Pyx_RefNannySetupContext("PyArray_MultiIterNew3", 0); /* "C:/Anaconda2/lib/site-packages/Cython/Includes/numpy/__init__.pxd":777 * * cdef inline object PyArray_MultiIterNew3(a, b, c): * return PyArray_MultiIterNew(3, <void*>a, <void*>b, <void*> c) # <<<<<<<<<<<<<< * * cdef inline object PyArray_MultiIterNew4(a, b, c, d): */ __Pyx_XDECREF(__pyx_r); __pyx_t_1 = PyArray_MultiIterNew(3, ((void *)__pyx_v_a), ((void *)__pyx_v_b), ((void *)__pyx_v_c)); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 777, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __pyx_r = __pyx_t_1; __pyx_t_1 = 0; goto __pyx_L0; /* "C:/Anaconda2/lib/site-packages/Cython/Includes/numpy/__init__.pxd":776 * return PyArray_MultiIterNew(2, <void*>a, <void*>b) * * cdef inline object PyArray_MultiIterNew3(a, b, c): # <<<<<<<<<<<<<< * return PyArray_MultiIterNew(3, <void*>a, <void*>b, <void*> c) * */ /* function exit code */ __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_1); __Pyx_AddTraceback("numpy.PyArray_MultiIterNew3", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = 0; __pyx_L0:; __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "C:/Anaconda2/lib/site-packages/Cython/Includes/numpy/__init__.pxd":779 * return PyArray_MultiIterNew(3, <void*>a, <void*>b, <void*> c) * * cdef inline object PyArray_MultiIterNew4(a, b, c, d): # <<<<<<<<<<<<<< * return PyArray_MultiIterNew(4, <void*>a, <void*>b, <void*>c, <void*> d) * */ static CYTHON_INLINE PyObject *__pyx_f_5numpy_PyArray_MultiIterNew4(PyObject *__pyx_v_a, PyObject *__pyx_v_b, PyObject *__pyx_v_c, PyObject *__pyx_v_d) { PyObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations PyObject *__pyx_t_1 = NULL; __Pyx_RefNannySetupContext("PyArray_MultiIterNew4", 0); /* "C:/Anaconda2/lib/site-packages/Cython/Includes/numpy/__init__.pxd":780 * * cdef inline object PyArray_MultiIterNew4(a, b, c, d): * return PyArray_MultiIterNew(4, <void*>a, <void*>b, <void*>c, <void*> d) # <<<<<<<<<<<<<< * * cdef inline object PyArray_MultiIterNew5(a, b, c, d, e): */ __Pyx_XDECREF(__pyx_r); __pyx_t_1 = PyArray_MultiIterNew(4, ((void *)__pyx_v_a), ((void *)__pyx_v_b), ((void *)__pyx_v_c), ((void *)__pyx_v_d)); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 780, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __pyx_r = __pyx_t_1; __pyx_t_1 = 0; goto __pyx_L0; /* "C:/Anaconda2/lib/site-packages/Cython/Includes/numpy/__init__.pxd":779 * return PyArray_MultiIterNew(3, <void*>a, <void*>b, <void*> c) * * cdef inline object PyArray_MultiIterNew4(a, b, c, d): # <<<<<<<<<<<<<< * return PyArray_MultiIterNew(4, <void*>a, <void*>b, <void*>c, <void*> d) * */ /* function exit code */ __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_1); __Pyx_AddTraceback("numpy.PyArray_MultiIterNew4", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = 0; __pyx_L0:; __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "C:/Anaconda2/lib/site-packages/Cython/Includes/numpy/__init__.pxd":782 * return PyArray_MultiIterNew(4, <void*>a, <void*>b, <void*>c, <void*> d) * * cdef inline object PyArray_MultiIterNew5(a, b, c, d, e): # <<<<<<<<<<<<<< * return PyArray_MultiIterNew(5, <void*>a, <void*>b, <void*>c, <void*> d, <void*> e) * */ static CYTHON_INLINE PyObject *__pyx_f_5numpy_PyArray_MultiIterNew5(PyObject *__pyx_v_a, PyObject *__pyx_v_b, PyObject *__pyx_v_c, PyObject *__pyx_v_d, PyObject *__pyx_v_e) { PyObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations PyObject *__pyx_t_1 = NULL; __Pyx_RefNannySetupContext("PyArray_MultiIterNew5", 0); /* "C:/Anaconda2/lib/site-packages/Cython/Includes/numpy/__init__.pxd":783 * * cdef inline object PyArray_MultiIterNew5(a, b, c, d, e): * return PyArray_MultiIterNew(5, <void*>a, <void*>b, <void*>c, <void*> d, <void*> e) # <<<<<<<<<<<<<< * * cdef inline char* _util_dtypestring(dtype descr, char* f, char* end, int* offset) except NULL: */ __Pyx_XDECREF(__pyx_r); __pyx_t_1 = PyArray_MultiIterNew(5, ((void *)__pyx_v_a), ((void *)__pyx_v_b), ((void *)__pyx_v_c), ((void *)__pyx_v_d), ((void *)__pyx_v_e)); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 783, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __pyx_r = __pyx_t_1; __pyx_t_1 = 0; goto __pyx_L0; /* "C:/Anaconda2/lib/site-packages/Cython/Includes/numpy/__init__.pxd":782 * return PyArray_MultiIterNew(4, <void*>a, <void*>b, <void*>c, <void*> d) * * cdef inline object PyArray_MultiIterNew5(a, b, c, d, e): # <<<<<<<<<<<<<< * return PyArray_MultiIterNew(5, <void*>a, <void*>b, <void*>c, <void*> d, <void*> e) * */ /* function exit code */ __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_1); __Pyx_AddTraceback("numpy.PyArray_MultiIterNew5", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = 0; __pyx_L0:; __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "C:/Anaconda2/lib/site-packages/Cython/Includes/numpy/__init__.pxd":785 * return PyArray_MultiIterNew(5, <void*>a, <void*>b, <void*>c, <void*> d, <void*> e) * * cdef inline char* _util_dtypestring(dtype descr, char* f, char* end, int* offset) except NULL: # <<<<<<<<<<<<<< * # Recursive utility function used in __getbuffer__ to get format * # string. The new location in the format string is returned. */ static CYTHON_INLINE char *__pyx_f_5numpy__util_dtypestring(PyArray_Descr *__pyx_v_descr, char *__pyx_v_f, char *__pyx_v_end, int *__pyx_v_offset) { PyArray_Descr *__pyx_v_child = 0; int __pyx_v_endian_detector; int __pyx_v_little_endian; PyObject *__pyx_v_fields = 0; PyObject *__pyx_v_childname = NULL; PyObject *__pyx_v_new_offset = NULL; PyObject *__pyx_v_t = NULL; char *__pyx_r; __Pyx_RefNannyDeclarations PyObject *__pyx_t_1 = NULL; Py_ssize_t __pyx_t_2; PyObject *__pyx_t_3 = NULL; PyObject *__pyx_t_4 = NULL; int __pyx_t_5; int __pyx_t_6; int __pyx_t_7; long __pyx_t_8; char *__pyx_t_9; __Pyx_RefNannySetupContext("_util_dtypestring", 0); /* "C:/Anaconda2/lib/site-packages/Cython/Includes/numpy/__init__.pxd":790 * * cdef dtype child * cdef int endian_detector = 1 # <<<<<<<<<<<<<< * cdef bint little_endian = ((<char*>&endian_detector)[0] != 0) * cdef tuple fields */ __pyx_v_endian_detector = 1; /* "C:/Anaconda2/lib/site-packages/Cython/Includes/numpy/__init__.pxd":791 * cdef dtype child * cdef int endian_detector = 1 * cdef bint little_endian = ((<char*>&endian_detector)[0] != 0) # <<<<<<<<<<<<<< * cdef tuple fields * */ __pyx_v_little_endian = ((((char *)(&__pyx_v_endian_detector))[0]) != 0); /* "C:/Anaconda2/lib/site-packages/Cython/Includes/numpy/__init__.pxd":794 * cdef tuple fields * * for childname in descr.names: # <<<<<<<<<<<<<< * fields = descr.fields[childname] * child, new_offset = fields */ if (unlikely(__pyx_v_descr->names == Py_None)) { PyErr_SetString(PyExc_TypeError, "'NoneType' object is not iterable"); __PYX_ERR(1, 794, __pyx_L1_error) } __pyx_t_1 = __pyx_v_descr->names; __Pyx_INCREF(__pyx_t_1); __pyx_t_2 = 0; for (;;) { if (__pyx_t_2 >= PyTuple_GET_SIZE(__pyx_t_1)) break; #if CYTHON_COMPILING_IN_CPYTHON __pyx_t_3 = PyTuple_GET_ITEM(__pyx_t_1, __pyx_t_2); __Pyx_INCREF(__pyx_t_3); __pyx_t_2++; if (unlikely(0 < 0)) __PYX_ERR(1, 794, __pyx_L1_error) #else __pyx_t_3 = PySequence_ITEM(__pyx_t_1, __pyx_t_2); __pyx_t_2++; if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 794, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); #endif __Pyx_XDECREF_SET(__pyx_v_childname, __pyx_t_3); __pyx_t_3 = 0; /* "C:/Anaconda2/lib/site-packages/Cython/Includes/numpy/__init__.pxd":795 * * for childname in descr.names: * fields = descr.fields[childname] # <<<<<<<<<<<<<< * child, new_offset = fields * */ if (unlikely(__pyx_v_descr->fields == Py_None)) { PyErr_SetString(PyExc_TypeError, "'NoneType' object is not subscriptable"); __PYX_ERR(1, 795, __pyx_L1_error) } __pyx_t_3 = __Pyx_PyDict_GetItem(__pyx_v_descr->fields, __pyx_v_childname); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 795, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); if (!(likely(PyTuple_CheckExact(__pyx_t_3))||((__pyx_t_3) == Py_None)||(PyErr_Format(PyExc_TypeError, "Expected %.16s, got %.200s", "tuple", Py_TYPE(__pyx_t_3)->tp_name), 0))) __PYX_ERR(1, 795, __pyx_L1_error) __Pyx_XDECREF_SET(__pyx_v_fields, ((PyObject*)__pyx_t_3)); __pyx_t_3 = 0; /* "C:/Anaconda2/lib/site-packages/Cython/Includes/numpy/__init__.pxd":796 * for childname in descr.names: * fields = descr.fields[childname] * child, new_offset = fields # <<<<<<<<<<<<<< * * if (end - f) - <int>(new_offset - offset[0]) < 15: */ if (likely(__pyx_v_fields != Py_None)) { PyObject* sequence = __pyx_v_fields; #if CYTHON_COMPILING_IN_CPYTHON Py_ssize_t size = Py_SIZE(sequence); #else Py_ssize_t size = PySequence_Size(sequence); #endif if (unlikely(size != 2)) { if (size > 2) __Pyx_RaiseTooManyValuesError(2); else if (size >= 0) __Pyx_RaiseNeedMoreValuesError(size); __PYX_ERR(1, 796, __pyx_L1_error) } #if CYTHON_COMPILING_IN_CPYTHON __pyx_t_3 = PyTuple_GET_ITEM(sequence, 0); __pyx_t_4 = PyTuple_GET_ITEM(sequence, 1); __Pyx_INCREF(__pyx_t_3); __Pyx_INCREF(__pyx_t_4); #else __pyx_t_3 = PySequence_ITEM(sequence, 0); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 796, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __pyx_t_4 = PySequence_ITEM(sequence, 1); if (unlikely(!__pyx_t_4)) __PYX_ERR(1, 796, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_4); #endif } else { __Pyx_RaiseNoneNotIterableError(); __PYX_ERR(1, 796, __pyx_L1_error) } if (!(likely(((__pyx_t_3) == Py_None) || likely(__Pyx_TypeTest(__pyx_t_3, __pyx_ptype_5numpy_dtype))))) __PYX_ERR(1, 796, __pyx_L1_error) __Pyx_XDECREF_SET(__pyx_v_child, ((PyArray_Descr *)__pyx_t_3)); __pyx_t_3 = 0; __Pyx_XDECREF_SET(__pyx_v_new_offset, __pyx_t_4); __pyx_t_4 = 0; /* "C:/Anaconda2/lib/site-packages/Cython/Includes/numpy/__init__.pxd":798 * child, new_offset = fields * * if (end - f) - <int>(new_offset - offset[0]) < 15: # <<<<<<<<<<<<<< * raise RuntimeError(u"Format string allocated too short, see comment in numpy.pxd") * */ __pyx_t_4 = __Pyx_PyInt_From_int((__pyx_v_offset[0])); if (unlikely(!__pyx_t_4)) __PYX_ERR(1, 798, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_4); __pyx_t_3 = PyNumber_Subtract(__pyx_v_new_offset, __pyx_t_4); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 798, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; __pyx_t_5 = __Pyx_PyInt_As_int(__pyx_t_3); if (unlikely((__pyx_t_5 == (int)-1) && PyErr_Occurred())) __PYX_ERR(1, 798, __pyx_L1_error) __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; __pyx_t_6 = ((((__pyx_v_end - __pyx_v_f) - ((int)__pyx_t_5)) < 15) != 0); if (__pyx_t_6) { /* "C:/Anaconda2/lib/site-packages/Cython/Includes/numpy/__init__.pxd":799 * * if (end - f) - <int>(new_offset - offset[0]) < 15: * raise RuntimeError(u"Format string allocated too short, see comment in numpy.pxd") # <<<<<<<<<<<<<< * * if ((child.byteorder == c'>' and little_endian) or */ __pyx_t_3 = __Pyx_PyObject_Call(__pyx_builtin_RuntimeError, __pyx_tuple__8, NULL); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 799, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __Pyx_Raise(__pyx_t_3, 0, 0, 0); __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; __PYX_ERR(1, 799, __pyx_L1_error) /* "C:/Anaconda2/lib/site-packages/Cython/Includes/numpy/__init__.pxd":798 * child, new_offset = fields * * if (end - f) - <int>(new_offset - offset[0]) < 15: # <<<<<<<<<<<<<< * raise RuntimeError(u"Format string allocated too short, see comment in numpy.pxd") * */ } /* "C:/Anaconda2/lib/site-packages/Cython/Includes/numpy/__init__.pxd":801 * raise RuntimeError(u"Format string allocated too short, see comment in numpy.pxd") * * if ((child.byteorder == c'>' and little_endian) or # <<<<<<<<<<<<<< * (child.byteorder == c'<' and not little_endian)): * raise ValueError(u"Non-native byte order not supported") */ __pyx_t_7 = ((__pyx_v_child->byteorder == '>') != 0); if (!__pyx_t_7) { goto __pyx_L8_next_or; } else { } __pyx_t_7 = (__pyx_v_little_endian != 0); if (!__pyx_t_7) { } else { __pyx_t_6 = __pyx_t_7; goto __pyx_L7_bool_binop_done; } __pyx_L8_next_or:; /* "C:/Anaconda2/lib/site-packages/Cython/Includes/numpy/__init__.pxd":802 * * if ((child.byteorder == c'>' and little_endian) or * (child.byteorder == c'<' and not little_endian)): # <<<<<<<<<<<<<< * raise ValueError(u"Non-native byte order not supported") * # One could encode it in the format string and have Cython */ __pyx_t_7 = ((__pyx_v_child->byteorder == '<') != 0); if (__pyx_t_7) { } else { __pyx_t_6 = __pyx_t_7; goto __pyx_L7_bool_binop_done; } __pyx_t_7 = ((!(__pyx_v_little_endian != 0)) != 0); __pyx_t_6 = __pyx_t_7; __pyx_L7_bool_binop_done:; /* "C:/Anaconda2/lib/site-packages/Cython/Includes/numpy/__init__.pxd":801 * raise RuntimeError(u"Format string allocated too short, see comment in numpy.pxd") * * if ((child.byteorder == c'>' and little_endian) or # <<<<<<<<<<<<<< * (child.byteorder == c'<' and not little_endian)): * raise ValueError(u"Non-native byte order not supported") */ if (__pyx_t_6) { /* "C:/Anaconda2/lib/site-packages/Cython/Includes/numpy/__init__.pxd":803 * if ((child.byteorder == c'>' and little_endian) or * (child.byteorder == c'<' and not little_endian)): * raise ValueError(u"Non-native byte order not supported") # <<<<<<<<<<<<<< * # One could encode it in the format string and have Cython * # complain instead, BUT: < and > in format strings also imply */ __pyx_t_3 = __Pyx_PyObject_Call(__pyx_builtin_ValueError, __pyx_tuple__9, NULL); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 803, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __Pyx_Raise(__pyx_t_3, 0, 0, 0); __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; __PYX_ERR(1, 803, __pyx_L1_error) /* "C:/Anaconda2/lib/site-packages/Cython/Includes/numpy/__init__.pxd":801 * raise RuntimeError(u"Format string allocated too short, see comment in numpy.pxd") * * if ((child.byteorder == c'>' and little_endian) or # <<<<<<<<<<<<<< * (child.byteorder == c'<' and not little_endian)): * raise ValueError(u"Non-native byte order not supported") */ } /* "C:/Anaconda2/lib/site-packages/Cython/Includes/numpy/__init__.pxd":813 * * # Output padding bytes * while offset[0] < new_offset: # <<<<<<<<<<<<<< * f[0] = 120 # "x"; pad byte * f += 1 */ while (1) { __pyx_t_3 = __Pyx_PyInt_From_int((__pyx_v_offset[0])); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 813, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __pyx_t_4 = PyObject_RichCompare(__pyx_t_3, __pyx_v_new_offset, Py_LT); __Pyx_XGOTREF(__pyx_t_4); if (unlikely(!__pyx_t_4)) __PYX_ERR(1, 813, __pyx_L1_error) __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; __pyx_t_6 = __Pyx_PyObject_IsTrue(__pyx_t_4); if (unlikely(__pyx_t_6 < 0)) __PYX_ERR(1, 813, __pyx_L1_error) __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; if (!__pyx_t_6) break; /* "C:/Anaconda2/lib/site-packages/Cython/Includes/numpy/__init__.pxd":814 * # Output padding bytes * while offset[0] < new_offset: * f[0] = 120 # "x"; pad byte # <<<<<<<<<<<<<< * f += 1 * offset[0] += 1 */ (__pyx_v_f[0]) = 0x78; /* "C:/Anaconda2/lib/site-packages/Cython/Includes/numpy/__init__.pxd":815 * while offset[0] < new_offset: * f[0] = 120 # "x"; pad byte * f += 1 # <<<<<<<<<<<<<< * offset[0] += 1 * */ __pyx_v_f = (__pyx_v_f + 1); /* "C:/Anaconda2/lib/site-packages/Cython/Includes/numpy/__init__.pxd":816 * f[0] = 120 # "x"; pad byte * f += 1 * offset[0] += 1 # <<<<<<<<<<<<<< * * offset[0] += child.itemsize */ __pyx_t_8 = 0; (__pyx_v_offset[__pyx_t_8]) = ((__pyx_v_offset[__pyx_t_8]) + 1); } /* "C:/Anaconda2/lib/site-packages/Cython/Includes/numpy/__init__.pxd":818 * offset[0] += 1 * * offset[0] += child.itemsize # <<<<<<<<<<<<<< * * if not PyDataType_HASFIELDS(child): */ __pyx_t_8 = 0; (__pyx_v_offset[__pyx_t_8]) = ((__pyx_v_offset[__pyx_t_8]) + __pyx_v_child->elsize); /* "C:/Anaconda2/lib/site-packages/Cython/Includes/numpy/__init__.pxd":820 * offset[0] += child.itemsize * * if not PyDataType_HASFIELDS(child): # <<<<<<<<<<<<<< * t = child.type_num * if end - f < 5: */ __pyx_t_6 = ((!(PyDataType_HASFIELDS(__pyx_v_child) != 0)) != 0); if (__pyx_t_6) { /* "C:/Anaconda2/lib/site-packages/Cython/Includes/numpy/__init__.pxd":821 * * if not PyDataType_HASFIELDS(child): * t = child.type_num # <<<<<<<<<<<<<< * if end - f < 5: * raise RuntimeError(u"Format string allocated too short.") */ __pyx_t_4 = __Pyx_PyInt_From_int(__pyx_v_child->type_num); if (unlikely(!__pyx_t_4)) __PYX_ERR(1, 821, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_4); __Pyx_XDECREF_SET(__pyx_v_t, __pyx_t_4); __pyx_t_4 = 0; /* "C:/Anaconda2/lib/site-packages/Cython/Includes/numpy/__init__.pxd":822 * if not PyDataType_HASFIELDS(child): * t = child.type_num * if end - f < 5: # <<<<<<<<<<<<<< * raise RuntimeError(u"Format string allocated too short.") * */ __pyx_t_6 = (((__pyx_v_end - __pyx_v_f) < 5) != 0); if (__pyx_t_6) { /* "C:/Anaconda2/lib/site-packages/Cython/Includes/numpy/__init__.pxd":823 * t = child.type_num * if end - f < 5: * raise RuntimeError(u"Format string allocated too short.") # <<<<<<<<<<<<<< * * # Until ticket #99 is fixed, use integers to avoid warnings */ __pyx_t_4 = __Pyx_PyObject_Call(__pyx_builtin_RuntimeError, __pyx_tuple__10, NULL); if (unlikely(!__pyx_t_4)) __PYX_ERR(1, 823, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_4); __Pyx_Raise(__pyx_t_4, 0, 0, 0); __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; __PYX_ERR(1, 823, __pyx_L1_error) /* "C:/Anaconda2/lib/site-packages/Cython/Includes/numpy/__init__.pxd":822 * if not PyDataType_HASFIELDS(child): * t = child.type_num * if end - f < 5: # <<<<<<<<<<<<<< * raise RuntimeError(u"Format string allocated too short.") * */ } /* "C:/Anaconda2/lib/site-packages/Cython/Includes/numpy/__init__.pxd":826 * * # Until ticket #99 is fixed, use integers to avoid warnings * if t == NPY_BYTE: f[0] = 98 #"b" # <<<<<<<<<<<<<< * elif t == NPY_UBYTE: f[0] = 66 #"B" * elif t == NPY_SHORT: f[0] = 104 #"h" */ __pyx_t_4 = __Pyx_PyInt_From_enum__NPY_TYPES(NPY_BYTE); if (unlikely(!__pyx_t_4)) __PYX_ERR(1, 826, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_4); __pyx_t_3 = PyObject_RichCompare(__pyx_v_t, __pyx_t_4, Py_EQ); __Pyx_XGOTREF(__pyx_t_3); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 826, __pyx_L1_error) __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; __pyx_t_6 = __Pyx_PyObject_IsTrue(__pyx_t_3); if (unlikely(__pyx_t_6 < 0)) __PYX_ERR(1, 826, __pyx_L1_error) __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; if (__pyx_t_6) { (__pyx_v_f[0]) = 98; goto __pyx_L15; } /* "C:/Anaconda2/lib/site-packages/Cython/Includes/numpy/__init__.pxd":827 * # Until ticket #99 is fixed, use integers to avoid warnings * if t == NPY_BYTE: f[0] = 98 #"b" * elif t == NPY_UBYTE: f[0] = 66 #"B" # <<<<<<<<<<<<<< * elif t == NPY_SHORT: f[0] = 104 #"h" * elif t == NPY_USHORT: f[0] = 72 #"H" */ __pyx_t_3 = __Pyx_PyInt_From_enum__NPY_TYPES(NPY_UBYTE); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 827, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __pyx_t_4 = PyObject_RichCompare(__pyx_v_t, __pyx_t_3, Py_EQ); __Pyx_XGOTREF(__pyx_t_4); if (unlikely(!__pyx_t_4)) __PYX_ERR(1, 827, __pyx_L1_error) __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; __pyx_t_6 = __Pyx_PyObject_IsTrue(__pyx_t_4); if (unlikely(__pyx_t_6 < 0)) __PYX_ERR(1, 827, __pyx_L1_error) __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; if (__pyx_t_6) { (__pyx_v_f[0]) = 66; goto __pyx_L15; } /* "C:/Anaconda2/lib/site-packages/Cython/Includes/numpy/__init__.pxd":828 * if t == NPY_BYTE: f[0] = 98 #"b" * elif t == NPY_UBYTE: f[0] = 66 #"B" * elif t == NPY_SHORT: f[0] = 104 #"h" # <<<<<<<<<<<<<< * elif t == NPY_USHORT: f[0] = 72 #"H" * elif t == NPY_INT: f[0] = 105 #"i" */ __pyx_t_4 = __Pyx_PyInt_From_enum__NPY_TYPES(NPY_SHORT); if (unlikely(!__pyx_t_4)) __PYX_ERR(1, 828, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_4); __pyx_t_3 = PyObject_RichCompare(__pyx_v_t, __pyx_t_4, Py_EQ); __Pyx_XGOTREF(__pyx_t_3); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 828, __pyx_L1_error) __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; __pyx_t_6 = __Pyx_PyObject_IsTrue(__pyx_t_3); if (unlikely(__pyx_t_6 < 0)) __PYX_ERR(1, 828, __pyx_L1_error) __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; if (__pyx_t_6) { (__pyx_v_f[0]) = 0x68; goto __pyx_L15; } /* "C:/Anaconda2/lib/site-packages/Cython/Includes/numpy/__init__.pxd":829 * elif t == NPY_UBYTE: f[0] = 66 #"B" * elif t == NPY_SHORT: f[0] = 104 #"h" * elif t == NPY_USHORT: f[0] = 72 #"H" # <<<<<<<<<<<<<< * elif t == NPY_INT: f[0] = 105 #"i" * elif t == NPY_UINT: f[0] = 73 #"I" */ __pyx_t_3 = __Pyx_PyInt_From_enum__NPY_TYPES(NPY_USHORT); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 829, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __pyx_t_4 = PyObject_RichCompare(__pyx_v_t, __pyx_t_3, Py_EQ); __Pyx_XGOTREF(__pyx_t_4); if (unlikely(!__pyx_t_4)) __PYX_ERR(1, 829, __pyx_L1_error) __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; __pyx_t_6 = __Pyx_PyObject_IsTrue(__pyx_t_4); if (unlikely(__pyx_t_6 < 0)) __PYX_ERR(1, 829, __pyx_L1_error) __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; if (__pyx_t_6) { (__pyx_v_f[0]) = 72; goto __pyx_L15; } /* "C:/Anaconda2/lib/site-packages/Cython/Includes/numpy/__init__.pxd":830 * elif t == NPY_SHORT: f[0] = 104 #"h" * elif t == NPY_USHORT: f[0] = 72 #"H" * elif t == NPY_INT: f[0] = 105 #"i" # <<<<<<<<<<<<<< * elif t == NPY_UINT: f[0] = 73 #"I" * elif t == NPY_LONG: f[0] = 108 #"l" */ __pyx_t_4 = __Pyx_PyInt_From_enum__NPY_TYPES(NPY_INT); if (unlikely(!__pyx_t_4)) __PYX_ERR(1, 830, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_4); __pyx_t_3 = PyObject_RichCompare(__pyx_v_t, __pyx_t_4, Py_EQ); __Pyx_XGOTREF(__pyx_t_3); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 830, __pyx_L1_error) __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; __pyx_t_6 = __Pyx_PyObject_IsTrue(__pyx_t_3); if (unlikely(__pyx_t_6 < 0)) __PYX_ERR(1, 830, __pyx_L1_error) __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; if (__pyx_t_6) { (__pyx_v_f[0]) = 0x69; goto __pyx_L15; } /* "C:/Anaconda2/lib/site-packages/Cython/Includes/numpy/__init__.pxd":831 * elif t == NPY_USHORT: f[0] = 72 #"H" * elif t == NPY_INT: f[0] = 105 #"i" * elif t == NPY_UINT: f[0] = 73 #"I" # <<<<<<<<<<<<<< * elif t == NPY_LONG: f[0] = 108 #"l" * elif t == NPY_ULONG: f[0] = 76 #"L" */ __pyx_t_3 = __Pyx_PyInt_From_enum__NPY_TYPES(NPY_UINT); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 831, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __pyx_t_4 = PyObject_RichCompare(__pyx_v_t, __pyx_t_3, Py_EQ); __Pyx_XGOTREF(__pyx_t_4); if (unlikely(!__pyx_t_4)) __PYX_ERR(1, 831, __pyx_L1_error) __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; __pyx_t_6 = __Pyx_PyObject_IsTrue(__pyx_t_4); if (unlikely(__pyx_t_6 < 0)) __PYX_ERR(1, 831, __pyx_L1_error) __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; if (__pyx_t_6) { (__pyx_v_f[0]) = 73; goto __pyx_L15; } /* "C:/Anaconda2/lib/site-packages/Cython/Includes/numpy/__init__.pxd":832 * elif t == NPY_INT: f[0] = 105 #"i" * elif t == NPY_UINT: f[0] = 73 #"I" * elif t == NPY_LONG: f[0] = 108 #"l" # <<<<<<<<<<<<<< * elif t == NPY_ULONG: f[0] = 76 #"L" * elif t == NPY_LONGLONG: f[0] = 113 #"q" */ __pyx_t_4 = __Pyx_PyInt_From_enum__NPY_TYPES(NPY_LONG); if (unlikely(!__pyx_t_4)) __PYX_ERR(1, 832, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_4); __pyx_t_3 = PyObject_RichCompare(__pyx_v_t, __pyx_t_4, Py_EQ); __Pyx_XGOTREF(__pyx_t_3); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 832, __pyx_L1_error) __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; __pyx_t_6 = __Pyx_PyObject_IsTrue(__pyx_t_3); if (unlikely(__pyx_t_6 < 0)) __PYX_ERR(1, 832, __pyx_L1_error) __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; if (__pyx_t_6) { (__pyx_v_f[0]) = 0x6C; goto __pyx_L15; } /* "C:/Anaconda2/lib/site-packages/Cython/Includes/numpy/__init__.pxd":833 * elif t == NPY_UINT: f[0] = 73 #"I" * elif t == NPY_LONG: f[0] = 108 #"l" * elif t == NPY_ULONG: f[0] = 76 #"L" # <<<<<<<<<<<<<< * elif t == NPY_LONGLONG: f[0] = 113 #"q" * elif t == NPY_ULONGLONG: f[0] = 81 #"Q" */ __pyx_t_3 = __Pyx_PyInt_From_enum__NPY_TYPES(NPY_ULONG); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 833, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __pyx_t_4 = PyObject_RichCompare(__pyx_v_t, __pyx_t_3, Py_EQ); __Pyx_XGOTREF(__pyx_t_4); if (unlikely(!__pyx_t_4)) __PYX_ERR(1, 833, __pyx_L1_error) __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; __pyx_t_6 = __Pyx_PyObject_IsTrue(__pyx_t_4); if (unlikely(__pyx_t_6 < 0)) __PYX_ERR(1, 833, __pyx_L1_error) __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; if (__pyx_t_6) { (__pyx_v_f[0]) = 76; goto __pyx_L15; } /* "C:/Anaconda2/lib/site-packages/Cython/Includes/numpy/__init__.pxd":834 * elif t == NPY_LONG: f[0] = 108 #"l" * elif t == NPY_ULONG: f[0] = 76 #"L" * elif t == NPY_LONGLONG: f[0] = 113 #"q" # <<<<<<<<<<<<<< * elif t == NPY_ULONGLONG: f[0] = 81 #"Q" * elif t == NPY_FLOAT: f[0] = 102 #"f" */ __pyx_t_4 = __Pyx_PyInt_From_enum__NPY_TYPES(NPY_LONGLONG); if (unlikely(!__pyx_t_4)) __PYX_ERR(1, 834, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_4); __pyx_t_3 = PyObject_RichCompare(__pyx_v_t, __pyx_t_4, Py_EQ); __Pyx_XGOTREF(__pyx_t_3); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 834, __pyx_L1_error) __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; __pyx_t_6 = __Pyx_PyObject_IsTrue(__pyx_t_3); if (unlikely(__pyx_t_6 < 0)) __PYX_ERR(1, 834, __pyx_L1_error) __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; if (__pyx_t_6) { (__pyx_v_f[0]) = 0x71; goto __pyx_L15; } /* "C:/Anaconda2/lib/site-packages/Cython/Includes/numpy/__init__.pxd":835 * elif t == NPY_ULONG: f[0] = 76 #"L" * elif t == NPY_LONGLONG: f[0] = 113 #"q" * elif t == NPY_ULONGLONG: f[0] = 81 #"Q" # <<<<<<<<<<<<<< * elif t == NPY_FLOAT: f[0] = 102 #"f" * elif t == NPY_DOUBLE: f[0] = 100 #"d" */ __pyx_t_3 = __Pyx_PyInt_From_enum__NPY_TYPES(NPY_ULONGLONG); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 835, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __pyx_t_4 = PyObject_RichCompare(__pyx_v_t, __pyx_t_3, Py_EQ); __Pyx_XGOTREF(__pyx_t_4); if (unlikely(!__pyx_t_4)) __PYX_ERR(1, 835, __pyx_L1_error) __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; __pyx_t_6 = __Pyx_PyObject_IsTrue(__pyx_t_4); if (unlikely(__pyx_t_6 < 0)) __PYX_ERR(1, 835, __pyx_L1_error) __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; if (__pyx_t_6) { (__pyx_v_f[0]) = 81; goto __pyx_L15; } /* "C:/Anaconda2/lib/site-packages/Cython/Includes/numpy/__init__.pxd":836 * elif t == NPY_LONGLONG: f[0] = 113 #"q" * elif t == NPY_ULONGLONG: f[0] = 81 #"Q" * elif t == NPY_FLOAT: f[0] = 102 #"f" # <<<<<<<<<<<<<< * elif t == NPY_DOUBLE: f[0] = 100 #"d" * elif t == NPY_LONGDOUBLE: f[0] = 103 #"g" */ __pyx_t_4 = __Pyx_PyInt_From_enum__NPY_TYPES(NPY_FLOAT); if (unlikely(!__pyx_t_4)) __PYX_ERR(1, 836, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_4); __pyx_t_3 = PyObject_RichCompare(__pyx_v_t, __pyx_t_4, Py_EQ); __Pyx_XGOTREF(__pyx_t_3); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 836, __pyx_L1_error) __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; __pyx_t_6 = __Pyx_PyObject_IsTrue(__pyx_t_3); if (unlikely(__pyx_t_6 < 0)) __PYX_ERR(1, 836, __pyx_L1_error) __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; if (__pyx_t_6) { (__pyx_v_f[0]) = 0x66; goto __pyx_L15; } /* "C:/Anaconda2/lib/site-packages/Cython/Includes/numpy/__init__.pxd":837 * elif t == NPY_ULONGLONG: f[0] = 81 #"Q" * elif t == NPY_FLOAT: f[0] = 102 #"f" * elif t == NPY_DOUBLE: f[0] = 100 #"d" # <<<<<<<<<<<<<< * elif t == NPY_LONGDOUBLE: f[0] = 103 #"g" * elif t == NPY_CFLOAT: f[0] = 90; f[1] = 102; f += 1 # Zf */ __pyx_t_3 = __Pyx_PyInt_From_enum__NPY_TYPES(NPY_DOUBLE); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 837, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __pyx_t_4 = PyObject_RichCompare(__pyx_v_t, __pyx_t_3, Py_EQ); __Pyx_XGOTREF(__pyx_t_4); if (unlikely(!__pyx_t_4)) __PYX_ERR(1, 837, __pyx_L1_error) __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; __pyx_t_6 = __Pyx_PyObject_IsTrue(__pyx_t_4); if (unlikely(__pyx_t_6 < 0)) __PYX_ERR(1, 837, __pyx_L1_error) __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; if (__pyx_t_6) { (__pyx_v_f[0]) = 0x64; goto __pyx_L15; } /* "C:/Anaconda2/lib/site-packages/Cython/Includes/numpy/__init__.pxd":838 * elif t == NPY_FLOAT: f[0] = 102 #"f" * elif t == NPY_DOUBLE: f[0] = 100 #"d" * elif t == NPY_LONGDOUBLE: f[0] = 103 #"g" # <<<<<<<<<<<<<< * elif t == NPY_CFLOAT: f[0] = 90; f[1] = 102; f += 1 # Zf * elif t == NPY_CDOUBLE: f[0] = 90; f[1] = 100; f += 1 # Zd */ __pyx_t_4 = __Pyx_PyInt_From_enum__NPY_TYPES(NPY_LONGDOUBLE); if (unlikely(!__pyx_t_4)) __PYX_ERR(1, 838, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_4); __pyx_t_3 = PyObject_RichCompare(__pyx_v_t, __pyx_t_4, Py_EQ); __Pyx_XGOTREF(__pyx_t_3); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 838, __pyx_L1_error) __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; __pyx_t_6 = __Pyx_PyObject_IsTrue(__pyx_t_3); if (unlikely(__pyx_t_6 < 0)) __PYX_ERR(1, 838, __pyx_L1_error) __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; if (__pyx_t_6) { (__pyx_v_f[0]) = 0x67; goto __pyx_L15; } /* "C:/Anaconda2/lib/site-packages/Cython/Includes/numpy/__init__.pxd":839 * elif t == NPY_DOUBLE: f[0] = 100 #"d" * elif t == NPY_LONGDOUBLE: f[0] = 103 #"g" * elif t == NPY_CFLOAT: f[0] = 90; f[1] = 102; f += 1 # Zf # <<<<<<<<<<<<<< * elif t == NPY_CDOUBLE: f[0] = 90; f[1] = 100; f += 1 # Zd * elif t == NPY_CLONGDOUBLE: f[0] = 90; f[1] = 103; f += 1 # Zg */ __pyx_t_3 = __Pyx_PyInt_From_enum__NPY_TYPES(NPY_CFLOAT); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 839, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __pyx_t_4 = PyObject_RichCompare(__pyx_v_t, __pyx_t_3, Py_EQ); __Pyx_XGOTREF(__pyx_t_4); if (unlikely(!__pyx_t_4)) __PYX_ERR(1, 839, __pyx_L1_error) __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; __pyx_t_6 = __Pyx_PyObject_IsTrue(__pyx_t_4); if (unlikely(__pyx_t_6 < 0)) __PYX_ERR(1, 839, __pyx_L1_error) __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; if (__pyx_t_6) { (__pyx_v_f[0]) = 90; (__pyx_v_f[1]) = 0x66; __pyx_v_f = (__pyx_v_f + 1); goto __pyx_L15; } /* "C:/Anaconda2/lib/site-packages/Cython/Includes/numpy/__init__.pxd":840 * elif t == NPY_LONGDOUBLE: f[0] = 103 #"g" * elif t == NPY_CFLOAT: f[0] = 90; f[1] = 102; f += 1 # Zf * elif t == NPY_CDOUBLE: f[0] = 90; f[1] = 100; f += 1 # Zd # <<<<<<<<<<<<<< * elif t == NPY_CLONGDOUBLE: f[0] = 90; f[1] = 103; f += 1 # Zg * elif t == NPY_OBJECT: f[0] = 79 #"O" */ __pyx_t_4 = __Pyx_PyInt_From_enum__NPY_TYPES(NPY_CDOUBLE); if (unlikely(!__pyx_t_4)) __PYX_ERR(1, 840, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_4); __pyx_t_3 = PyObject_RichCompare(__pyx_v_t, __pyx_t_4, Py_EQ); __Pyx_XGOTREF(__pyx_t_3); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 840, __pyx_L1_error) __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; __pyx_t_6 = __Pyx_PyObject_IsTrue(__pyx_t_3); if (unlikely(__pyx_t_6 < 0)) __PYX_ERR(1, 840, __pyx_L1_error) __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; if (__pyx_t_6) { (__pyx_v_f[0]) = 90; (__pyx_v_f[1]) = 0x64; __pyx_v_f = (__pyx_v_f + 1); goto __pyx_L15; } /* "C:/Anaconda2/lib/site-packages/Cython/Includes/numpy/__init__.pxd":841 * elif t == NPY_CFLOAT: f[0] = 90; f[1] = 102; f += 1 # Zf * elif t == NPY_CDOUBLE: f[0] = 90; f[1] = 100; f += 1 # Zd * elif t == NPY_CLONGDOUBLE: f[0] = 90; f[1] = 103; f += 1 # Zg # <<<<<<<<<<<<<< * elif t == NPY_OBJECT: f[0] = 79 #"O" * else: */ __pyx_t_3 = __Pyx_PyInt_From_enum__NPY_TYPES(NPY_CLONGDOUBLE); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 841, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __pyx_t_4 = PyObject_RichCompare(__pyx_v_t, __pyx_t_3, Py_EQ); __Pyx_XGOTREF(__pyx_t_4); if (unlikely(!__pyx_t_4)) __PYX_ERR(1, 841, __pyx_L1_error) __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; __pyx_t_6 = __Pyx_PyObject_IsTrue(__pyx_t_4); if (unlikely(__pyx_t_6 < 0)) __PYX_ERR(1, 841, __pyx_L1_error) __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; if (__pyx_t_6) { (__pyx_v_f[0]) = 90; (__pyx_v_f[1]) = 0x67; __pyx_v_f = (__pyx_v_f + 1); goto __pyx_L15; } /* "C:/Anaconda2/lib/site-packages/Cython/Includes/numpy/__init__.pxd":842 * elif t == NPY_CDOUBLE: f[0] = 90; f[1] = 100; f += 1 # Zd * elif t == NPY_CLONGDOUBLE: f[0] = 90; f[1] = 103; f += 1 # Zg * elif t == NPY_OBJECT: f[0] = 79 #"O" # <<<<<<<<<<<<<< * else: * raise ValueError(u"unknown dtype code in numpy.pxd (%d)" % t) */ __pyx_t_4 = __Pyx_PyInt_From_enum__NPY_TYPES(NPY_OBJECT); if (unlikely(!__pyx_t_4)) __PYX_ERR(1, 842, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_4); __pyx_t_3 = PyObject_RichCompare(__pyx_v_t, __pyx_t_4, Py_EQ); __Pyx_XGOTREF(__pyx_t_3); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 842, __pyx_L1_error) __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; __pyx_t_6 = __Pyx_PyObject_IsTrue(__pyx_t_3); if (unlikely(__pyx_t_6 < 0)) __PYX_ERR(1, 842, __pyx_L1_error) __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; if (__pyx_t_6) { (__pyx_v_f[0]) = 79; goto __pyx_L15; } /* "C:/Anaconda2/lib/site-packages/Cython/Includes/numpy/__init__.pxd":844 * elif t == NPY_OBJECT: f[0] = 79 #"O" * else: * raise ValueError(u"unknown dtype code in numpy.pxd (%d)" % t) # <<<<<<<<<<<<<< * f += 1 * else: */ /*else*/ { __pyx_t_3 = PyUnicode_Format(__pyx_kp_u_unknown_dtype_code_in_numpy_pxd, __pyx_v_t); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 844, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __pyx_t_4 = PyTuple_New(1); if (unlikely(!__pyx_t_4)) __PYX_ERR(1, 844, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_4); __Pyx_GIVEREF(__pyx_t_3); PyTuple_SET_ITEM(__pyx_t_4, 0, __pyx_t_3); __pyx_t_3 = 0; __pyx_t_3 = __Pyx_PyObject_Call(__pyx_builtin_ValueError, __pyx_t_4, NULL); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 844, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; __Pyx_Raise(__pyx_t_3, 0, 0, 0); __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; __PYX_ERR(1, 844, __pyx_L1_error) } __pyx_L15:; /* "C:/Anaconda2/lib/site-packages/Cython/Includes/numpy/__init__.pxd":845 * else: * raise ValueError(u"unknown dtype code in numpy.pxd (%d)" % t) * f += 1 # <<<<<<<<<<<<<< * else: * # Cython ignores struct boundary information ("T{...}"), */ __pyx_v_f = (__pyx_v_f + 1); /* "C:/Anaconda2/lib/site-packages/Cython/Includes/numpy/__init__.pxd":820 * offset[0] += child.itemsize * * if not PyDataType_HASFIELDS(child): # <<<<<<<<<<<<<< * t = child.type_num * if end - f < 5: */ goto __pyx_L13; } /* "C:/Anaconda2/lib/site-packages/Cython/Includes/numpy/__init__.pxd":849 * # Cython ignores struct boundary information ("T{...}"), * # so don't output it * f = _util_dtypestring(child, f, end, offset) # <<<<<<<<<<<<<< * return f * */ /*else*/ { __pyx_t_9 = __pyx_f_5numpy__util_dtypestring(__pyx_v_child, __pyx_v_f, __pyx_v_end, __pyx_v_offset); if (unlikely(__pyx_t_9 == NULL)) __PYX_ERR(1, 849, __pyx_L1_error) __pyx_v_f = __pyx_t_9; } __pyx_L13:; /* "C:/Anaconda2/lib/site-packages/Cython/Includes/numpy/__init__.pxd":794 * cdef tuple fields * * for childname in descr.names: # <<<<<<<<<<<<<< * fields = descr.fields[childname] * child, new_offset = fields */ } __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; /* "C:/Anaconda2/lib/site-packages/Cython/Includes/numpy/__init__.pxd":850 * # so don't output it * f = _util_dtypestring(child, f, end, offset) * return f # <<<<<<<<<<<<<< * * */ __pyx_r = __pyx_v_f; goto __pyx_L0; /* "C:/Anaconda2/lib/site-packages/Cython/Includes/numpy/__init__.pxd":785 * return PyArray_MultiIterNew(5, <void*>a, <void*>b, <void*>c, <void*> d, <void*> e) * * cdef inline char* _util_dtypestring(dtype descr, char* f, char* end, int* offset) except NULL: # <<<<<<<<<<<<<< * # Recursive utility function used in __getbuffer__ to get format * # string. The new location in the format string is returned. */ /* function exit code */ __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_1); __Pyx_XDECREF(__pyx_t_3); __Pyx_XDECREF(__pyx_t_4); __Pyx_AddTraceback("numpy._util_dtypestring", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = NULL; __pyx_L0:; __Pyx_XDECREF((PyObject *)__pyx_v_child); __Pyx_XDECREF(__pyx_v_fields); __Pyx_XDECREF(__pyx_v_childname); __Pyx_XDECREF(__pyx_v_new_offset); __Pyx_XDECREF(__pyx_v_t); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "C:/Anaconda2/lib/site-packages/Cython/Includes/numpy/__init__.pxd":966 * * * cdef inline void set_array_base(ndarray arr, object base): # <<<<<<<<<<<<<< * cdef PyObject* baseptr * if base is None: */ static CYTHON_INLINE void __pyx_f_5numpy_set_array_base(PyArrayObject *__pyx_v_arr, PyObject *__pyx_v_base) { PyObject *__pyx_v_baseptr; __Pyx_RefNannyDeclarations int __pyx_t_1; int __pyx_t_2; __Pyx_RefNannySetupContext("set_array_base", 0); /* "C:/Anaconda2/lib/site-packages/Cython/Includes/numpy/__init__.pxd":968 * cdef inline void set_array_base(ndarray arr, object base): * cdef PyObject* baseptr * if base is None: # <<<<<<<<<<<<<< * baseptr = NULL * else: */ __pyx_t_1 = (__pyx_v_base == Py_None); __pyx_t_2 = (__pyx_t_1 != 0); if (__pyx_t_2) { /* "C:/Anaconda2/lib/site-packages/Cython/Includes/numpy/__init__.pxd":969 * cdef PyObject* baseptr * if base is None: * baseptr = NULL # <<<<<<<<<<<<<< * else: * Py_INCREF(base) # important to do this before decref below! */ __pyx_v_baseptr = NULL; /* "C:/Anaconda2/lib/site-packages/Cython/Includes/numpy/__init__.pxd":968 * cdef inline void set_array_base(ndarray arr, object base): * cdef PyObject* baseptr * if base is None: # <<<<<<<<<<<<<< * baseptr = NULL * else: */ goto __pyx_L3; } /* "C:/Anaconda2/lib/site-packages/Cython/Includes/numpy/__init__.pxd":971 * baseptr = NULL * else: * Py_INCREF(base) # important to do this before decref below! # <<<<<<<<<<<<<< * baseptr = <PyObject*>base * Py_XDECREF(arr.base) */ /*else*/ { Py_INCREF(__pyx_v_base); /* "C:/Anaconda2/lib/site-packages/Cython/Includes/numpy/__init__.pxd":972 * else: * Py_INCREF(base) # important to do this before decref below! * baseptr = <PyObject*>base # <<<<<<<<<<<<<< * Py_XDECREF(arr.base) * arr.base = baseptr */ __pyx_v_baseptr = ((PyObject *)__pyx_v_base); } __pyx_L3:; /* "C:/Anaconda2/lib/site-packages/Cython/Includes/numpy/__init__.pxd":973 * Py_INCREF(base) # important to do this before decref below! * baseptr = <PyObject*>base * Py_XDECREF(arr.base) # <<<<<<<<<<<<<< * arr.base = baseptr * */ Py_XDECREF(__pyx_v_arr->base); /* "C:/Anaconda2/lib/site-packages/Cython/Includes/numpy/__init__.pxd":974 * baseptr = <PyObject*>base * Py_XDECREF(arr.base) * arr.base = baseptr # <<<<<<<<<<<<<< * * cdef inline object get_array_base(ndarray arr): */ __pyx_v_arr->base = __pyx_v_baseptr; /* "C:/Anaconda2/lib/site-packages/Cython/Includes/numpy/__init__.pxd":966 * * * cdef inline void set_array_base(ndarray arr, object base): # <<<<<<<<<<<<<< * cdef PyObject* baseptr * if base is None: */ /* function exit code */ __Pyx_RefNannyFinishContext(); } /* "C:/Anaconda2/lib/site-packages/Cython/Includes/numpy/__init__.pxd":976 * arr.base = baseptr * * cdef inline object get_array_base(ndarray arr): # <<<<<<<<<<<<<< * if arr.base is NULL: * return None */ static CYTHON_INLINE PyObject *__pyx_f_5numpy_get_array_base(PyArrayObject *__pyx_v_arr) { PyObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations int __pyx_t_1; __Pyx_RefNannySetupContext("get_array_base", 0); /* "C:/Anaconda2/lib/site-packages/Cython/Includes/numpy/__init__.pxd":977 * * cdef inline object get_array_base(ndarray arr): * if arr.base is NULL: # <<<<<<<<<<<<<< * return None * else: */ __pyx_t_1 = ((__pyx_v_arr->base == NULL) != 0); if (__pyx_t_1) { /* "C:/Anaconda2/lib/site-packages/Cython/Includes/numpy/__init__.pxd":978 * cdef inline object get_array_base(ndarray arr): * if arr.base is NULL: * return None # <<<<<<<<<<<<<< * else: * return <object>arr.base */ __Pyx_XDECREF(__pyx_r); __Pyx_INCREF(Py_None); __pyx_r = Py_None; goto __pyx_L0; /* "C:/Anaconda2/lib/site-packages/Cython/Includes/numpy/__init__.pxd":977 * * cdef inline object get_array_base(ndarray arr): * if arr.base is NULL: # <<<<<<<<<<<<<< * return None * else: */ } /* "C:/Anaconda2/lib/site-packages/Cython/Includes/numpy/__init__.pxd":980 * return None * else: * return <object>arr.base # <<<<<<<<<<<<<< */ /*else*/ { __Pyx_XDECREF(__pyx_r); __Pyx_INCREF(((PyObject *)__pyx_v_arr->base)); __pyx_r = ((PyObject *)__pyx_v_arr->base); goto __pyx_L0; } /* "C:/Anaconda2/lib/site-packages/Cython/Includes/numpy/__init__.pxd":976 * arr.base = baseptr * * cdef inline object get_array_base(ndarray arr): # <<<<<<<<<<<<<< * if arr.base is NULL: * return None */ /* function exit code */ __pyx_L0:; __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } static PyMethodDef __pyx_methods[] = { {0, 0, 0, 0} }; #if PY_MAJOR_VERSION >= 3 static struct PyModuleDef __pyx_moduledef = { #if PY_VERSION_HEX < 0x03020000 { PyObject_HEAD_INIT(NULL) NULL, 0, NULL }, #else PyModuleDef_HEAD_INIT, #endif "gpu_nms", 0, /* m_doc */ -1, /* m_size */ __pyx_methods /* m_methods */, NULL, /* m_reload */ NULL, /* m_traverse */ NULL, /* m_clear */ NULL /* m_free */ }; #endif static __Pyx_StringTabEntry __pyx_string_tab[] = { {&__pyx_kp_s_D_v_zix_caffe_caffe_win_20160523, __pyx_k_D_v_zix_caffe_caffe_win_20160523, sizeof(__pyx_k_D_v_zix_caffe_caffe_win_20160523), 0, 0, 1, 0}, {&__pyx_kp_u_Format_string_allocated_too_shor, __pyx_k_Format_string_allocated_too_shor, sizeof(__pyx_k_Format_string_allocated_too_shor), 0, 1, 0, 0}, {&__pyx_kp_u_Format_string_allocated_too_shor_2, __pyx_k_Format_string_allocated_too_shor_2, sizeof(__pyx_k_Format_string_allocated_too_shor_2), 0, 1, 0, 0}, {&__pyx_kp_u_Non_native_byte_order_not_suppor, __pyx_k_Non_native_byte_order_not_suppor, sizeof(__pyx_k_Non_native_byte_order_not_suppor), 0, 1, 0, 0}, {&__pyx_n_s_RuntimeError, __pyx_k_RuntimeError, sizeof(__pyx_k_RuntimeError), 0, 0, 1, 1}, {&__pyx_n_s_ValueError, __pyx_k_ValueError, sizeof(__pyx_k_ValueError), 0, 0, 1, 1}, {&__pyx_n_s_argsort, __pyx_k_argsort, sizeof(__pyx_k_argsort), 0, 0, 1, 1}, {&__pyx_n_s_boxes_dim, __pyx_k_boxes_dim, sizeof(__pyx_k_boxes_dim), 0, 0, 1, 1}, {&__pyx_n_s_boxes_num, __pyx_k_boxes_num, sizeof(__pyx_k_boxes_num), 0, 0, 1, 1}, {&__pyx_n_s_dets, __pyx_k_dets, sizeof(__pyx_k_dets), 0, 0, 1, 1}, {&__pyx_n_s_device_id, __pyx_k_device_id, sizeof(__pyx_k_device_id), 0, 0, 1, 1}, {&__pyx_n_s_dtype, __pyx_k_dtype, sizeof(__pyx_k_dtype), 0, 0, 1, 1}, {&__pyx_n_s_gpu_nms, __pyx_k_gpu_nms, sizeof(__pyx_k_gpu_nms), 0, 0, 1, 1}, {&__pyx_n_s_import, __pyx_k_import, sizeof(__pyx_k_import), 0, 0, 1, 1}, {&__pyx_n_s_int32, __pyx_k_int32, sizeof(__pyx_k_int32), 0, 0, 1, 1}, {&__pyx_n_s_keep, __pyx_k_keep, sizeof(__pyx_k_keep), 0, 0, 1, 1}, {&__pyx_n_s_main, __pyx_k_main, sizeof(__pyx_k_main), 0, 0, 1, 1}, {&__pyx_kp_u_ndarray_is_not_C_contiguous, __pyx_k_ndarray_is_not_C_contiguous, sizeof(__pyx_k_ndarray_is_not_C_contiguous), 0, 1, 0, 0}, {&__pyx_kp_u_ndarray_is_not_Fortran_contiguou, __pyx_k_ndarray_is_not_Fortran_contiguou, sizeof(__pyx_k_ndarray_is_not_Fortran_contiguou), 0, 1, 0, 0}, {&__pyx_n_s_nms_gpu_nms, __pyx_k_nms_gpu_nms, sizeof(__pyx_k_nms_gpu_nms), 0, 0, 1, 1}, {&__pyx_n_s_np, __pyx_k_np, sizeof(__pyx_k_np), 0, 0, 1, 1}, {&__pyx_n_s_num_out, __pyx_k_num_out, sizeof(__pyx_k_num_out), 0, 0, 1, 1}, {&__pyx_n_s_numpy, __pyx_k_numpy, sizeof(__pyx_k_numpy), 0, 0, 1, 1}, {&__pyx_n_s_order, __pyx_k_order, sizeof(__pyx_k_order), 0, 0, 1, 1}, {&__pyx_n_s_range, __pyx_k_range, sizeof(__pyx_k_range), 0, 0, 1, 1}, {&__pyx_n_s_scores, __pyx_k_scores, sizeof(__pyx_k_scores), 0, 0, 1, 1}, {&__pyx_n_s_sorted_dets, __pyx_k_sorted_dets, sizeof(__pyx_k_sorted_dets), 0, 0, 1, 1}, {&__pyx_n_s_test, __pyx_k_test, sizeof(__pyx_k_test), 0, 0, 1, 1}, {&__pyx_n_s_thresh, __pyx_k_thresh, sizeof(__pyx_k_thresh), 0, 0, 1, 1}, {&__pyx_kp_u_unknown_dtype_code_in_numpy_pxd, __pyx_k_unknown_dtype_code_in_numpy_pxd, sizeof(__pyx_k_unknown_dtype_code_in_numpy_pxd), 0, 1, 0, 0}, {&__pyx_n_s_zeros, __pyx_k_zeros, sizeof(__pyx_k_zeros), 0, 0, 1, 1}, {0, 0, 0, 0, 0, 0, 0} }; static int __Pyx_InitCachedBuiltins(void) { __pyx_builtin_ValueError = __Pyx_GetBuiltinName(__pyx_n_s_ValueError); if (!__pyx_builtin_ValueError) __PYX_ERR(1, 218, __pyx_L1_error) __pyx_builtin_range = __Pyx_GetBuiltinName(__pyx_n_s_range); if (!__pyx_builtin_range) __PYX_ERR(1, 231, __pyx_L1_error) __pyx_builtin_RuntimeError = __Pyx_GetBuiltinName(__pyx_n_s_RuntimeError); if (!__pyx_builtin_RuntimeError) __PYX_ERR(1, 799, __pyx_L1_error) return 0; __pyx_L1_error:; return -1; } static int __Pyx_InitCachedConstants(void) { __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("__Pyx_InitCachedConstants", 0); /* "nms/gpu_nms.pyx":24 * keep = np.zeros(boxes_num, dtype=np.int32) * cdef np.ndarray[np.float32_t, ndim=1] \ * scores = dets[:, 4] # <<<<<<<<<<<<<< * #cdef np.ndarray[np.int_t, ndim=1] \ // 20160601, by xzn * # order = scores.argsort()[::-1] */ __pyx_slice_ = PySlice_New(Py_None, Py_None, Py_None); if (unlikely(!__pyx_slice_)) __PYX_ERR(0, 24, __pyx_L1_error) __Pyx_GOTREF(__pyx_slice_); __Pyx_GIVEREF(__pyx_slice_); __pyx_tuple__2 = PyTuple_Pack(2, __pyx_slice_, __pyx_int_4); if (unlikely(!__pyx_tuple__2)) __PYX_ERR(0, 24, __pyx_L1_error) __Pyx_GOTREF(__pyx_tuple__2); __Pyx_GIVEREF(__pyx_tuple__2); /* "nms/gpu_nms.pyx":28 * # order = scores.argsort()[::-1] * cdef np.ndarray[np.intp_t, ndim=1] \ * order = scores.argsort()[::-1] # <<<<<<<<<<<<<< * cdef np.ndarray[np.float32_t, ndim=2] \ * sorted_dets = dets[order, :] */ __pyx_slice__3 = PySlice_New(Py_None, Py_None, __pyx_int_neg_1); if (unlikely(!__pyx_slice__3)) __PYX_ERR(0, 28, __pyx_L1_error) __Pyx_GOTREF(__pyx_slice__3); __Pyx_GIVEREF(__pyx_slice__3); /* "nms/gpu_nms.pyx":30 * order = scores.argsort()[::-1] * cdef np.ndarray[np.float32_t, ndim=2] \ * sorted_dets = dets[order, :] # <<<<<<<<<<<<<< * _nms(&keep[0], &num_out, &sorted_dets[0, 0], boxes_num, boxes_dim, thresh, device_id) * keep = keep[:num_out] */ __pyx_slice__4 = PySlice_New(Py_None, Py_None, Py_None); if (unlikely(!__pyx_slice__4)) __PYX_ERR(0, 30, __pyx_L1_error) __Pyx_GOTREF(__pyx_slice__4); __Pyx_GIVEREF(__pyx_slice__4); /* "C:/Anaconda2/lib/site-packages/Cython/Includes/numpy/__init__.pxd":218 * if ((flags & pybuf.PyBUF_C_CONTIGUOUS == pybuf.PyBUF_C_CONTIGUOUS) * and not PyArray_CHKFLAGS(self, NPY_C_CONTIGUOUS)): * raise ValueError(u"ndarray is not C contiguous") # <<<<<<<<<<<<<< * * if ((flags & pybuf.PyBUF_F_CONTIGUOUS == pybuf.PyBUF_F_CONTIGUOUS) */ __pyx_tuple__5 = PyTuple_Pack(1, __pyx_kp_u_ndarray_is_not_C_contiguous); if (unlikely(!__pyx_tuple__5)) __PYX_ERR(1, 218, __pyx_L1_error) __Pyx_GOTREF(__pyx_tuple__5); __Pyx_GIVEREF(__pyx_tuple__5); /* "C:/Anaconda2/lib/site-packages/Cython/Includes/numpy/__init__.pxd":222 * if ((flags & pybuf.PyBUF_F_CONTIGUOUS == pybuf.PyBUF_F_CONTIGUOUS) * and not PyArray_CHKFLAGS(self, NPY_F_CONTIGUOUS)): * raise ValueError(u"ndarray is not Fortran contiguous") # <<<<<<<<<<<<<< * * info.buf = PyArray_DATA(self) */ __pyx_tuple__6 = PyTuple_Pack(1, __pyx_kp_u_ndarray_is_not_Fortran_contiguou); if (unlikely(!__pyx_tuple__6)) __PYX_ERR(1, 222, __pyx_L1_error) __Pyx_GOTREF(__pyx_tuple__6); __Pyx_GIVEREF(__pyx_tuple__6); /* "C:/Anaconda2/lib/site-packages/Cython/Includes/numpy/__init__.pxd":259 * if ((descr.byteorder == c'>' and little_endian) or * (descr.byteorder == c'<' and not little_endian)): * raise ValueError(u"Non-native byte order not supported") # <<<<<<<<<<<<<< * if t == NPY_BYTE: f = "b" * elif t == NPY_UBYTE: f = "B" */ __pyx_tuple__7 = PyTuple_Pack(1, __pyx_kp_u_Non_native_byte_order_not_suppor); if (unlikely(!__pyx_tuple__7)) __PYX_ERR(1, 259, __pyx_L1_error) __Pyx_GOTREF(__pyx_tuple__7); __Pyx_GIVEREF(__pyx_tuple__7); /* "C:/Anaconda2/lib/site-packages/Cython/Includes/numpy/__init__.pxd":799 * * if (end - f) - <int>(new_offset - offset[0]) < 15: * raise RuntimeError(u"Format string allocated too short, see comment in numpy.pxd") # <<<<<<<<<<<<<< * * if ((child.byteorder == c'>' and little_endian) or */ __pyx_tuple__8 = PyTuple_Pack(1, __pyx_kp_u_Format_string_allocated_too_shor); if (unlikely(!__pyx_tuple__8)) __PYX_ERR(1, 799, __pyx_L1_error) __Pyx_GOTREF(__pyx_tuple__8); __Pyx_GIVEREF(__pyx_tuple__8); /* "C:/Anaconda2/lib/site-packages/Cython/Includes/numpy/__init__.pxd":803 * if ((child.byteorder == c'>' and little_endian) or * (child.byteorder == c'<' and not little_endian)): * raise ValueError(u"Non-native byte order not supported") # <<<<<<<<<<<<<< * # One could encode it in the format string and have Cython * # complain instead, BUT: < and > in format strings also imply */ __pyx_tuple__9 = PyTuple_Pack(1, __pyx_kp_u_Non_native_byte_order_not_suppor); if (unlikely(!__pyx_tuple__9)) __PYX_ERR(1, 803, __pyx_L1_error) __Pyx_GOTREF(__pyx_tuple__9); __Pyx_GIVEREF(__pyx_tuple__9); /* "C:/Anaconda2/lib/site-packages/Cython/Includes/numpy/__init__.pxd":823 * t = child.type_num * if end - f < 5: * raise RuntimeError(u"Format string allocated too short.") # <<<<<<<<<<<<<< * * # Until ticket #99 is fixed, use integers to avoid warnings */ __pyx_tuple__10 = PyTuple_Pack(1, __pyx_kp_u_Format_string_allocated_too_shor_2); if (unlikely(!__pyx_tuple__10)) __PYX_ERR(1, 823, __pyx_L1_error) __Pyx_GOTREF(__pyx_tuple__10); __Pyx_GIVEREF(__pyx_tuple__10); /* "nms/gpu_nms.pyx":16 * void _nms(np.int32_t*, int*, np.float32_t*, int, int, float, int) * * def gpu_nms(np.ndarray[np.float32_t, ndim=2] dets, np.float thresh, # <<<<<<<<<<<<<< * np.int32_t device_id=0): * cdef int boxes_num = dets.shape[0] */ __pyx_tuple__11 = PyTuple_Pack(10, __pyx_n_s_dets, __pyx_n_s_thresh, __pyx_n_s_device_id, __pyx_n_s_boxes_num, __pyx_n_s_boxes_dim, __pyx_n_s_num_out, __pyx_n_s_keep, __pyx_n_s_scores, __pyx_n_s_order, __pyx_n_s_sorted_dets); if (unlikely(!__pyx_tuple__11)) __PYX_ERR(0, 16, __pyx_L1_error) __Pyx_GOTREF(__pyx_tuple__11); __Pyx_GIVEREF(__pyx_tuple__11); __pyx_codeobj__12 = (PyObject*)__Pyx_PyCode_New(3, 0, 10, 0, 0, __pyx_empty_bytes, __pyx_empty_tuple, __pyx_empty_tuple, __pyx_tuple__11, __pyx_empty_tuple, __pyx_empty_tuple, __pyx_kp_s_D_v_zix_caffe_caffe_win_20160523, __pyx_n_s_gpu_nms, 16, __pyx_empty_bytes); if (unlikely(!__pyx_codeobj__12)) __PYX_ERR(0, 16, __pyx_L1_error) __Pyx_RefNannyFinishContext(); return 0; __pyx_L1_error:; __Pyx_RefNannyFinishContext(); return -1; } static int __Pyx_InitGlobals(void) { if (__Pyx_InitStrings(__pyx_string_tab) < 0) __PYX_ERR(0, 1, __pyx_L1_error); __pyx_int_4 = PyInt_FromLong(4); if (unlikely(!__pyx_int_4)) __PYX_ERR(0, 1, __pyx_L1_error) __pyx_int_neg_1 = PyInt_FromLong(-1); if (unlikely(!__pyx_int_neg_1)) __PYX_ERR(0, 1, __pyx_L1_error) return 0; __pyx_L1_error:; return -1; } #if PY_MAJOR_VERSION < 3 PyMODINIT_FUNC initgpu_nms(void); /*proto*/ PyMODINIT_FUNC initgpu_nms(void) #else PyMODINIT_FUNC PyInit_gpu_nms(void); /*proto*/ PyMODINIT_FUNC PyInit_gpu_nms(void) #endif { PyObject *__pyx_t_1 = NULL; __Pyx_RefNannyDeclarations #if CYTHON_REFNANNY __Pyx_RefNanny = __Pyx_RefNannyImportAPI("refnanny"); if (!__Pyx_RefNanny) { PyErr_Clear(); __Pyx_RefNanny = __Pyx_RefNannyImportAPI("Cython.Runtime.refnanny"); if (!__Pyx_RefNanny) Py_FatalError("failed to import 'refnanny' module"); } #endif __Pyx_RefNannySetupContext("PyMODINIT_FUNC PyInit_gpu_nms(void)", 0); if (__Pyx_check_binary_version() < 0) __PYX_ERR(0, 1, __pyx_L1_error) __pyx_empty_tuple = PyTuple_New(0); if (unlikely(!__pyx_empty_tuple)) __PYX_ERR(0, 1, __pyx_L1_error) __pyx_empty_bytes = PyBytes_FromStringAndSize("", 0); if (unlikely(!__pyx_empty_bytes)) __PYX_ERR(0, 1, __pyx_L1_error) __pyx_empty_unicode = PyUnicode_FromStringAndSize("", 0); if (unlikely(!__pyx_empty_unicode)) __PYX_ERR(0, 1, __pyx_L1_error) #ifdef __Pyx_CyFunction_USED if (__pyx_CyFunction_init() < 0) __PYX_ERR(0, 1, __pyx_L1_error) #endif #ifdef __Pyx_FusedFunction_USED if (__pyx_FusedFunction_init() < 0) __PYX_ERR(0, 1, __pyx_L1_error) #endif #ifdef __Pyx_Coroutine_USED if (__pyx_Coroutine_init() < 0) __PYX_ERR(0, 1, __pyx_L1_error) #endif #ifdef __Pyx_Generator_USED if (__pyx_Generator_init() < 0) __PYX_ERR(0, 1, __pyx_L1_error) #endif #ifdef __Pyx_StopAsyncIteration_USED if (__pyx_StopAsyncIteration_init() < 0) __PYX_ERR(0, 1, __pyx_L1_error) #endif /*--- Library function declarations ---*/ /*--- Threads initialization code ---*/ #if defined(__PYX_FORCE_INIT_THREADS) && __PYX_FORCE_INIT_THREADS #ifdef WITH_THREAD /* Python build with threading support? */ PyEval_InitThreads(); #endif #endif /*--- Module creation code ---*/ #if PY_MAJOR_VERSION < 3 __pyx_m = Py_InitModule4("gpu_nms", __pyx_methods, 0, 0, PYTHON_API_VERSION); Py_XINCREF(__pyx_m); #else __pyx_m = PyModule_Create(&__pyx_moduledef); #endif if (unlikely(!__pyx_m)) __PYX_ERR(0, 1, __pyx_L1_error) __pyx_d = PyModule_GetDict(__pyx_m); if (unlikely(!__pyx_d)) __PYX_ERR(0, 1, __pyx_L1_error) Py_INCREF(__pyx_d); __pyx_b = PyImport_AddModule(__Pyx_BUILTIN_MODULE_NAME); if (unlikely(!__pyx_b)) __PYX_ERR(0, 1, __pyx_L1_error) #if CYTHON_COMPILING_IN_PYPY Py_INCREF(__pyx_b); #endif if (PyObject_SetAttrString(__pyx_m, "__builtins__", __pyx_b) < 0) __PYX_ERR(0, 1, __pyx_L1_error); /*--- Initialize various global constants etc. ---*/ if (__Pyx_InitGlobals() < 0) __PYX_ERR(0, 1, __pyx_L1_error) #if PY_MAJOR_VERSION < 3 && (__PYX_DEFAULT_STRING_ENCODING_IS_ASCII || __PYX_DEFAULT_STRING_ENCODING_IS_DEFAULT) if (__Pyx_init_sys_getdefaultencoding_params() < 0) __PYX_ERR(0, 1, __pyx_L1_error) #endif if (__pyx_module_is_main_nms__gpu_nms) { if (PyObject_SetAttrString(__pyx_m, "__name__", __pyx_n_s_main) < 0) __PYX_ERR(0, 1, __pyx_L1_error) } #if PY_MAJOR_VERSION >= 3 { PyObject *modules = PyImport_GetModuleDict(); if (unlikely(!modules)) __PYX_ERR(0, 1, __pyx_L1_error) if (!PyDict_GetItemString(modules, "nms.gpu_nms")) { if (unlikely(PyDict_SetItemString(modules, "nms.gpu_nms", __pyx_m) < 0)) __PYX_ERR(0, 1, __pyx_L1_error) } } #endif /*--- Builtin init code ---*/ if (__Pyx_InitCachedBuiltins() < 0) __PYX_ERR(0, 1, __pyx_L1_error) /*--- Constants init code ---*/ if (__Pyx_InitCachedConstants() < 0) __PYX_ERR(0, 1, __pyx_L1_error) /*--- Global init code ---*/ /*--- Variable export code ---*/ /*--- Function export code ---*/ /*--- Type init code ---*/ /*--- Type import code ---*/ __pyx_ptype_7cpython_4type_type = __Pyx_ImportType(__Pyx_BUILTIN_MODULE_NAME, "type", #if CYTHON_COMPILING_IN_PYPY sizeof(PyTypeObject), #else sizeof(PyHeapTypeObject), #endif 0); if (unlikely(!__pyx_ptype_7cpython_4type_type)) __PYX_ERR(2, 9, __pyx_L1_error) __pyx_ptype_5numpy_dtype = __Pyx_ImportType("numpy", "dtype", sizeof(PyArray_Descr), 0); if (unlikely(!__pyx_ptype_5numpy_dtype)) __PYX_ERR(1, 155, __pyx_L1_error) __pyx_ptype_5numpy_flatiter = __Pyx_ImportType("numpy", "flatiter", sizeof(PyArrayIterObject), 0); if (unlikely(!__pyx_ptype_5numpy_flatiter)) __PYX_ERR(1, 168, __pyx_L1_error) __pyx_ptype_5numpy_broadcast = __Pyx_ImportType("numpy", "broadcast", sizeof(PyArrayMultiIterObject), 0); if (unlikely(!__pyx_ptype_5numpy_broadcast)) __PYX_ERR(1, 172, __pyx_L1_error) __pyx_ptype_5numpy_ndarray = __Pyx_ImportType("numpy", "ndarray", sizeof(PyArrayObject), 0); if (unlikely(!__pyx_ptype_5numpy_ndarray)) __PYX_ERR(1, 181, __pyx_L1_error) __pyx_ptype_5numpy_ufunc = __Pyx_ImportType("numpy", "ufunc", sizeof(PyUFuncObject), 0); if (unlikely(!__pyx_ptype_5numpy_ufunc)) __PYX_ERR(1, 861, __pyx_L1_error) /*--- Variable import code ---*/ /*--- Function import code ---*/ /*--- Execution code ---*/ #if defined(__Pyx_Generator_USED) || defined(__Pyx_Coroutine_USED) if (__Pyx_patch_abc() < 0) __PYX_ERR(0, 1, __pyx_L1_error) #endif /* "nms/gpu_nms.pyx":8 * # -------------------------------------------------------- * * import numpy as np # <<<<<<<<<<<<<< * cimport numpy as np * */ __pyx_t_1 = __Pyx_Import(__pyx_n_s_numpy, 0, -1); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 8, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); if (PyDict_SetItem(__pyx_d, __pyx_n_s_np, __pyx_t_1) < 0) __PYX_ERR(0, 8, __pyx_L1_error) __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; /* "nms/gpu_nms.pyx":11 * cimport numpy as np * * assert sizeof(int) == sizeof(np.int32_t) # <<<<<<<<<<<<<< * * cdef extern from "gpu_nms.hpp": */ #ifndef CYTHON_WITHOUT_ASSERTIONS if (unlikely(!Py_OptimizeFlag)) { if (unlikely(!(((sizeof(int)) == (sizeof(__pyx_t_5numpy_int32_t))) != 0))) { PyErr_SetNone(PyExc_AssertionError); __PYX_ERR(0, 11, __pyx_L1_error) } } #endif /* "nms/gpu_nms.pyx":16 * void _nms(np.int32_t*, int*, np.float32_t*, int, int, float, int) * * def gpu_nms(np.ndarray[np.float32_t, ndim=2] dets, np.float thresh, # <<<<<<<<<<<<<< * np.int32_t device_id=0): * cdef int boxes_num = dets.shape[0] */ __pyx_t_1 = PyCFunction_NewEx(&__pyx_mdef_3nms_7gpu_nms_1gpu_nms, NULL, __pyx_n_s_nms_gpu_nms); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 16, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); if (PyDict_SetItem(__pyx_d, __pyx_n_s_gpu_nms, __pyx_t_1) < 0) __PYX_ERR(0, 16, __pyx_L1_error) __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; /* "nms/gpu_nms.pyx":1 * # -------------------------------------------------------- # <<<<<<<<<<<<<< * # Faster R-CNN * # Copyright (c) 2015 Microsoft */ __pyx_t_1 = PyDict_New(); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 1, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); if (PyDict_SetItem(__pyx_d, __pyx_n_s_test, __pyx_t_1) < 0) __PYX_ERR(0, 1, __pyx_L1_error) __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; /* "C:/Anaconda2/lib/site-packages/Cython/Includes/numpy/__init__.pxd":976 * arr.base = baseptr * * cdef inline object get_array_base(ndarray arr): # <<<<<<<<<<<<<< * if arr.base is NULL: * return None */ /*--- Wrapped vars code ---*/ goto __pyx_L0; __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_1); if (__pyx_m) { if (__pyx_d) { __Pyx_AddTraceback("init nms.gpu_nms", __pyx_clineno, __pyx_lineno, __pyx_filename); } Py_DECREF(__pyx_m); __pyx_m = 0; } else if (!PyErr_Occurred()) { PyErr_SetString(PyExc_ImportError, "init nms.gpu_nms"); } __pyx_L0:; __Pyx_RefNannyFinishContext(); #if PY_MAJOR_VERSION < 3 return; #else return __pyx_m; #endif } /* --- Runtime support code --- */ /* Refnanny */ #if CYTHON_REFNANNY static __Pyx_RefNannyAPIStruct *__Pyx_RefNannyImportAPI(const char *modname) { PyObject *m = NULL, *p = NULL; void *r = NULL; m = PyImport_ImportModule((char *)modname); if (!m) goto end; p = PyObject_GetAttrString(m, (char *)"RefNannyAPI"); if (!p) goto end; r = PyLong_AsVoidPtr(p); end: Py_XDECREF(p); Py_XDECREF(m); return (__Pyx_RefNannyAPIStruct *)r; } #endif /* RaiseArgTupleInvalid */ static void __Pyx_RaiseArgtupleInvalid( const char* func_name, int exact, Py_ssize_t num_min, Py_ssize_t num_max, Py_ssize_t num_found) { Py_ssize_t num_expected; const char *more_or_less; if (num_found < num_min) { num_expected = num_min; more_or_less = "at least"; } else { num_expected = num_max; more_or_less = "at most"; } if (exact) { more_or_less = "exactly"; } PyErr_Format(PyExc_TypeError, "%.200s() takes %.8s %" CYTHON_FORMAT_SSIZE_T "d positional argument%.1s (%" CYTHON_FORMAT_SSIZE_T "d given)", func_name, more_or_less, num_expected, (num_expected == 1) ? "" : "s", num_found); } /* RaiseDoubleKeywords */ static void __Pyx_RaiseDoubleKeywordsError( const char* func_name, PyObject* kw_name) { PyErr_Format(PyExc_TypeError, #if PY_MAJOR_VERSION >= 3 "%s() got multiple values for keyword argument '%U'", func_name, kw_name); #else "%s() got multiple values for keyword argument '%s'", func_name, PyString_AsString(kw_name)); #endif } /* ParseKeywords */ static int __Pyx_ParseOptionalKeywords( PyObject *kwds, PyObject **argnames[], PyObject *kwds2, PyObject *values[], Py_ssize_t num_pos_args, const char* function_name) { PyObject *key = 0, *value = 0; Py_ssize_t pos = 0; PyObject*** name; PyObject*** first_kw_arg = argnames + num_pos_args; while (PyDict_Next(kwds, &pos, &key, &value)) { name = first_kw_arg; while (*name && (**name != key)) name++; if (*name) { values[name-argnames] = value; continue; } name = first_kw_arg; #if PY_MAJOR_VERSION < 3 if (likely(PyString_CheckExact(key)) || likely(PyString_Check(key))) { while (*name) { if ((CYTHON_COMPILING_IN_PYPY || PyString_GET_SIZE(**name) == PyString_GET_SIZE(key)) && _PyString_Eq(**name, key)) { values[name-argnames] = value; break; } name++; } if (*name) continue; else { PyObject*** argname = argnames; while (argname != first_kw_arg) { if ((**argname == key) || ( (CYTHON_COMPILING_IN_PYPY || PyString_GET_SIZE(**argname) == PyString_GET_SIZE(key)) && _PyString_Eq(**argname, key))) { goto arg_passed_twice; } argname++; } } } else #endif if (likely(PyUnicode_Check(key))) { while (*name) { int cmp = (**name == key) ? 0 : #if !CYTHON_COMPILING_IN_PYPY && PY_MAJOR_VERSION >= 3 (PyUnicode_GET_SIZE(**name) != PyUnicode_GET_SIZE(key)) ? 1 : #endif PyUnicode_Compare(**name, key); if (cmp < 0 && unlikely(PyErr_Occurred())) goto bad; if (cmp == 0) { values[name-argnames] = value; break; } name++; } if (*name) continue; else { PyObject*** argname = argnames; while (argname != first_kw_arg) { int cmp = (**argname == key) ? 0 : #if !CYTHON_COMPILING_IN_PYPY && PY_MAJOR_VERSION >= 3 (PyUnicode_GET_SIZE(**argname) != PyUnicode_GET_SIZE(key)) ? 1 : #endif PyUnicode_Compare(**argname, key); if (cmp < 0 && unlikely(PyErr_Occurred())) goto bad; if (cmp == 0) goto arg_passed_twice; argname++; } } } else goto invalid_keyword_type; if (kwds2) { if (unlikely(PyDict_SetItem(kwds2, key, value))) goto bad; } else { goto invalid_keyword; } } return 0; arg_passed_twice: __Pyx_RaiseDoubleKeywordsError(function_name, key); goto bad; invalid_keyword_type: PyErr_Format(PyExc_TypeError, "%.200s() keywords must be strings", function_name); goto bad; invalid_keyword: PyErr_Format(PyExc_TypeError, #if PY_MAJOR_VERSION < 3 "%.200s() got an unexpected keyword argument '%.200s'", function_name, PyString_AsString(key)); #else "%s() got an unexpected keyword argument '%U'", function_name, key); #endif bad: return -1; } /* ArgTypeTest */ static void __Pyx_RaiseArgumentTypeInvalid(const char* name, PyObject *obj, PyTypeObject *type) { PyErr_Format(PyExc_TypeError, "Argument '%.200s' has incorrect type (expected %.200s, got %.200s)", name, type->tp_name, Py_TYPE(obj)->tp_name); } static CYTHON_INLINE int __Pyx_ArgTypeTest(PyObject *obj, PyTypeObject *type, int none_allowed, const char *name, int exact) { if (unlikely(!type)) { PyErr_SetString(PyExc_SystemError, "Missing type object"); return 0; } if (none_allowed && obj == Py_None) return 1; else if (exact) { if (likely(Py_TYPE(obj) == type)) return 1; #if PY_MAJOR_VERSION == 2 else if ((type == &PyBaseString_Type) && likely(__Pyx_PyBaseString_CheckExact(obj))) return 1; #endif } else { if (likely(PyObject_TypeCheck(obj, type))) return 1; } __Pyx_RaiseArgumentTypeInvalid(name, obj, type); return 0; } /* BufferFormatCheck */ static CYTHON_INLINE int __Pyx_IsLittleEndian(void) { unsigned int n = 1; return *(unsigned char*)(&n) != 0; } static void __Pyx_BufFmt_Init(__Pyx_BufFmt_Context* ctx, __Pyx_BufFmt_StackElem* stack, __Pyx_TypeInfo* type) { stack[0].field = &ctx->root; stack[0].parent_offset = 0; ctx->root.type = type; ctx->root.name = "buffer dtype"; ctx->root.offset = 0; ctx->head = stack; ctx->head->field = &ctx->root; ctx->fmt_offset = 0; ctx->head->parent_offset = 0; ctx->new_packmode = '@'; ctx->enc_packmode = '@'; ctx->new_count = 1; ctx->enc_count = 0; ctx->enc_type = 0; ctx->is_complex = 0; ctx->is_valid_array = 0; ctx->struct_alignment = 0; while (type->typegroup == 'S') { ++ctx->head; ctx->head->field = type->fields; ctx->head->parent_offset = 0; type = type->fields->type; } } static int __Pyx_BufFmt_ParseNumber(const char** ts) { int count; const char* t = *ts; if (*t < '0' || *t > '9') { return -1; } else { count = *t++ - '0'; while (*t >= '0' && *t < '9') { count *= 10; count += *t++ - '0'; } } *ts = t; return count; } static int __Pyx_BufFmt_ExpectNumber(const char **ts) { int number = __Pyx_BufFmt_ParseNumber(ts); if (number == -1) PyErr_Format(PyExc_ValueError,\ "Does not understand character buffer dtype format string ('%c')", **ts); return number; } static void __Pyx_BufFmt_RaiseUnexpectedChar(char ch) { PyErr_Format(PyExc_ValueError, "Unexpected format string character: '%c'", ch); } static const char* __Pyx_BufFmt_DescribeTypeChar(char ch, int is_complex) { switch (ch) { case 'c': return "'char'"; case 'b': return "'signed char'"; case 'B': return "'unsigned char'"; case 'h': return "'short'"; case 'H': return "'unsigned short'"; case 'i': return "'int'"; case 'I': return "'unsigned int'"; case 'l': return "'long'"; case 'L': return "'unsigned long'"; case 'q': return "'long long'"; case 'Q': return "'unsigned long long'"; case 'f': return (is_complex ? "'complex float'" : "'float'"); case 'd': return (is_complex ? "'complex double'" : "'double'"); case 'g': return (is_complex ? "'complex long double'" : "'long double'"); case 'T': return "a struct"; case 'O': return "Python object"; case 'P': return "a pointer"; case 's': case 'p': return "a string"; case 0: return "end"; default: return "unparseable format string"; } } static size_t __Pyx_BufFmt_TypeCharToStandardSize(char ch, int is_complex) { switch (ch) { case '?': case 'c': case 'b': case 'B': case 's': case 'p': return 1; case 'h': case 'H': return 2; case 'i': case 'I': case 'l': case 'L': return 4; case 'q': case 'Q': return 8; case 'f': return (is_complex ? 8 : 4); case 'd': return (is_complex ? 16 : 8); case 'g': { PyErr_SetString(PyExc_ValueError, "Python does not define a standard format string size for long double ('g').."); return 0; } case 'O': case 'P': return sizeof(void*); default: __Pyx_BufFmt_RaiseUnexpectedChar(ch); return 0; } } static size_t __Pyx_BufFmt_TypeCharToNativeSize(char ch, int is_complex) { switch (ch) { case 'c': case 'b': case 'B': case 's': case 'p': return 1; case 'h': case 'H': return sizeof(short); case 'i': case 'I': return sizeof(int); case 'l': case 'L': return sizeof(long); #ifdef HAVE_LONG_LONG case 'q': case 'Q': return sizeof(PY_LONG_LONG); #endif case 'f': return sizeof(float) * (is_complex ? 2 : 1); case 'd': return sizeof(double) * (is_complex ? 2 : 1); case 'g': return sizeof(long double) * (is_complex ? 2 : 1); case 'O': case 'P': return sizeof(void*); default: { __Pyx_BufFmt_RaiseUnexpectedChar(ch); return 0; } } } typedef struct { char c; short x; } __Pyx_st_short; typedef struct { char c; int x; } __Pyx_st_int; typedef struct { char c; long x; } __Pyx_st_long; typedef struct { char c; float x; } __Pyx_st_float; typedef struct { char c; double x; } __Pyx_st_double; typedef struct { char c; long double x; } __Pyx_st_longdouble; typedef struct { char c; void *x; } __Pyx_st_void_p; #ifdef HAVE_LONG_LONG typedef struct { char c; PY_LONG_LONG x; } __Pyx_st_longlong; #endif static size_t __Pyx_BufFmt_TypeCharToAlignment(char ch, CYTHON_UNUSED int is_complex) { switch (ch) { case '?': case 'c': case 'b': case 'B': case 's': case 'p': return 1; case 'h': case 'H': return sizeof(__Pyx_st_short) - sizeof(short); case 'i': case 'I': return sizeof(__Pyx_st_int) - sizeof(int); case 'l': case 'L': return sizeof(__Pyx_st_long) - sizeof(long); #ifdef HAVE_LONG_LONG case 'q': case 'Q': return sizeof(__Pyx_st_longlong) - sizeof(PY_LONG_LONG); #endif case 'f': return sizeof(__Pyx_st_float) - sizeof(float); case 'd': return sizeof(__Pyx_st_double) - sizeof(double); case 'g': return sizeof(__Pyx_st_longdouble) - sizeof(long double); case 'P': case 'O': return sizeof(__Pyx_st_void_p) - sizeof(void*); default: __Pyx_BufFmt_RaiseUnexpectedChar(ch); return 0; } } /* These are for computing the padding at the end of the struct to align on the first member of the struct. This will probably the same as above, but we don't have any guarantees. */ typedef struct { short x; char c; } __Pyx_pad_short; typedef struct { int x; char c; } __Pyx_pad_int; typedef struct { long x; char c; } __Pyx_pad_long; typedef struct { float x; char c; } __Pyx_pad_float; typedef struct { double x; char c; } __Pyx_pad_double; typedef struct { long double x; char c; } __Pyx_pad_longdouble; typedef struct { void *x; char c; } __Pyx_pad_void_p; #ifdef HAVE_LONG_LONG typedef struct { PY_LONG_LONG x; char c; } __Pyx_pad_longlong; #endif static size_t __Pyx_BufFmt_TypeCharToPadding(char ch, CYTHON_UNUSED int is_complex) { switch (ch) { case '?': case 'c': case 'b': case 'B': case 's': case 'p': return 1; case 'h': case 'H': return sizeof(__Pyx_pad_short) - sizeof(short); case 'i': case 'I': return sizeof(__Pyx_pad_int) - sizeof(int); case 'l': case 'L': return sizeof(__Pyx_pad_long) - sizeof(long); #ifdef HAVE_LONG_LONG case 'q': case 'Q': return sizeof(__Pyx_pad_longlong) - sizeof(PY_LONG_LONG); #endif case 'f': return sizeof(__Pyx_pad_float) - sizeof(float); case 'd': return sizeof(__Pyx_pad_double) - sizeof(double); case 'g': return sizeof(__Pyx_pad_longdouble) - sizeof(long double); case 'P': case 'O': return sizeof(__Pyx_pad_void_p) - sizeof(void*); default: __Pyx_BufFmt_RaiseUnexpectedChar(ch); return 0; } } static char __Pyx_BufFmt_TypeCharToGroup(char ch, int is_complex) { switch (ch) { case 'c': return 'H'; case 'b': case 'h': case 'i': case 'l': case 'q': case 's': case 'p': return 'I'; case 'B': case 'H': case 'I': case 'L': case 'Q': return 'U'; case 'f': case 'd': case 'g': return (is_complex ? 'C' : 'R'); case 'O': return 'O'; case 'P': return 'P'; default: { __Pyx_BufFmt_RaiseUnexpectedChar(ch); return 0; } } } static void __Pyx_BufFmt_RaiseExpected(__Pyx_BufFmt_Context* ctx) { if (ctx->head == NULL || ctx->head->field == &ctx->root) { const char* expected; const char* quote; if (ctx->head == NULL) { expected = "end"; quote = ""; } else { expected = ctx->head->field->type->name; quote = "'"; } PyErr_Format(PyExc_ValueError, "Buffer dtype mismatch, expected %s%s%s but got %s", quote, expected, quote, __Pyx_BufFmt_DescribeTypeChar(ctx->enc_type, ctx->is_complex)); } else { __Pyx_StructField* field = ctx->head->field; __Pyx_StructField* parent = (ctx->head - 1)->field; PyErr_Format(PyExc_ValueError, "Buffer dtype mismatch, expected '%s' but got %s in '%s.%s'", field->type->name, __Pyx_BufFmt_DescribeTypeChar(ctx->enc_type, ctx->is_complex), parent->type->name, field->name); } } static int __Pyx_BufFmt_ProcessTypeChunk(__Pyx_BufFmt_Context* ctx) { char group; size_t size, offset, arraysize = 1; if (ctx->enc_type == 0) return 0; if (ctx->head->field->type->arraysize[0]) { int i, ndim = 0; if (ctx->enc_type == 's' || ctx->enc_type == 'p') { ctx->is_valid_array = ctx->head->field->type->ndim == 1; ndim = 1; if (ctx->enc_count != ctx->head->field->type->arraysize[0]) { PyErr_Format(PyExc_ValueError, "Expected a dimension of size %zu, got %zu", ctx->head->field->type->arraysize[0], ctx->enc_count); return -1; } } if (!ctx->is_valid_array) { PyErr_Format(PyExc_ValueError, "Expected %d dimensions, got %d", ctx->head->field->type->ndim, ndim); return -1; } for (i = 0; i < ctx->head->field->type->ndim; i++) { arraysize *= ctx->head->field->type->arraysize[i]; } ctx->is_valid_array = 0; ctx->enc_count = 1; } group = __Pyx_BufFmt_TypeCharToGroup(ctx->enc_type, ctx->is_complex); do { __Pyx_StructField* field = ctx->head->field; __Pyx_TypeInfo* type = field->type; if (ctx->enc_packmode == '@' || ctx->enc_packmode == '^') { size = __Pyx_BufFmt_TypeCharToNativeSize(ctx->enc_type, ctx->is_complex); } else { size = __Pyx_BufFmt_TypeCharToStandardSize(ctx->enc_type, ctx->is_complex); } if (ctx->enc_packmode == '@') { size_t align_at = __Pyx_BufFmt_TypeCharToAlignment(ctx->enc_type, ctx->is_complex); size_t align_mod_offset; if (align_at == 0) return -1; align_mod_offset = ctx->fmt_offset % align_at; if (align_mod_offset > 0) ctx->fmt_offset += align_at - align_mod_offset; if (ctx->struct_alignment == 0) ctx->struct_alignment = __Pyx_BufFmt_TypeCharToPadding(ctx->enc_type, ctx->is_complex); } if (type->size != size || type->typegroup != group) { if (type->typegroup == 'C' && type->fields != NULL) { size_t parent_offset = ctx->head->parent_offset + field->offset; ++ctx->head; ctx->head->field = type->fields; ctx->head->parent_offset = parent_offset; continue; } if ((type->typegroup == 'H' || group == 'H') && type->size == size) { } else { __Pyx_BufFmt_RaiseExpected(ctx); return -1; } } offset = ctx->head->parent_offset + field->offset; if (ctx->fmt_offset != offset) { PyErr_Format(PyExc_ValueError, "Buffer dtype mismatch; next field is at offset %" CYTHON_FORMAT_SSIZE_T "d but %" CYTHON_FORMAT_SSIZE_T "d expected", (Py_ssize_t)ctx->fmt_offset, (Py_ssize_t)offset); return -1; } ctx->fmt_offset += size; if (arraysize) ctx->fmt_offset += (arraysize - 1) * size; --ctx->enc_count; while (1) { if (field == &ctx->root) { ctx->head = NULL; if (ctx->enc_count != 0) { __Pyx_BufFmt_RaiseExpected(ctx); return -1; } break; } ctx->head->field = ++field; if (field->type == NULL) { --ctx->head; field = ctx->head->field; continue; } else if (field->type->typegroup == 'S') { size_t parent_offset = ctx->head->parent_offset + field->offset; if (field->type->fields->type == NULL) continue; field = field->type->fields; ++ctx->head; ctx->head->field = field; ctx->head->parent_offset = parent_offset; break; } else { break; } } } while (ctx->enc_count); ctx->enc_type = 0; ctx->is_complex = 0; return 0; } static CYTHON_INLINE PyObject * __pyx_buffmt_parse_array(__Pyx_BufFmt_Context* ctx, const char** tsp) { const char *ts = *tsp; int i = 0, number; int ndim = ctx->head->field->type->ndim; ; ++ts; if (ctx->new_count != 1) { PyErr_SetString(PyExc_ValueError, "Cannot handle repeated arrays in format string"); return NULL; } if (__Pyx_BufFmt_ProcessTypeChunk(ctx) == -1) return NULL; while (*ts && *ts != ')') { switch (*ts) { case ' ': case '\f': case '\r': case '\n': case '\t': case '\v': continue; default: break; } number = __Pyx_BufFmt_ExpectNumber(&ts); if (number == -1) return NULL; if (i < ndim && (size_t) number != ctx->head->field->type->arraysize[i]) return PyErr_Format(PyExc_ValueError, "Expected a dimension of size %zu, got %d", ctx->head->field->type->arraysize[i], number); if (*ts != ',' && *ts != ')') return PyErr_Format(PyExc_ValueError, "Expected a comma in format string, got '%c'", *ts); if (*ts == ',') ts++; i++; } if (i != ndim) return PyErr_Format(PyExc_ValueError, "Expected %d dimension(s), got %d", ctx->head->field->type->ndim, i); if (!*ts) { PyErr_SetString(PyExc_ValueError, "Unexpected end of format string, expected ')'"); return NULL; } ctx->is_valid_array = 1; ctx->new_count = 1; *tsp = ++ts; return Py_None; } static const char* __Pyx_BufFmt_CheckString(__Pyx_BufFmt_Context* ctx, const char* ts) { int got_Z = 0; while (1) { switch(*ts) { case 0: if (ctx->enc_type != 0 && ctx->head == NULL) { __Pyx_BufFmt_RaiseExpected(ctx); return NULL; } if (__Pyx_BufFmt_ProcessTypeChunk(ctx) == -1) return NULL; if (ctx->head != NULL) { __Pyx_BufFmt_RaiseExpected(ctx); return NULL; } return ts; case ' ': case '\r': case '\n': ++ts; break; case '<': if (!__Pyx_IsLittleEndian()) { PyErr_SetString(PyExc_ValueError, "Little-endian buffer not supported on big-endian compiler"); return NULL; } ctx->new_packmode = '='; ++ts; break; case '>': case '!': if (__Pyx_IsLittleEndian()) { PyErr_SetString(PyExc_ValueError, "Big-endian buffer not supported on little-endian compiler"); return NULL; } ctx->new_packmode = '='; ++ts; break; case '=': case '@': case '^': ctx->new_packmode = *ts++; break; case 'T': { const char* ts_after_sub; size_t i, struct_count = ctx->new_count; size_t struct_alignment = ctx->struct_alignment; ctx->new_count = 1; ++ts; if (*ts != '{') { PyErr_SetString(PyExc_ValueError, "Buffer acquisition: Expected '{' after 'T'"); return NULL; } if (__Pyx_BufFmt_ProcessTypeChunk(ctx) == -1) return NULL; ctx->enc_type = 0; ctx->enc_count = 0; ctx->struct_alignment = 0; ++ts; ts_after_sub = ts; for (i = 0; i != struct_count; ++i) { ts_after_sub = __Pyx_BufFmt_CheckString(ctx, ts); if (!ts_after_sub) return NULL; } ts = ts_after_sub; if (struct_alignment) ctx->struct_alignment = struct_alignment; } break; case '}': { size_t alignment = ctx->struct_alignment; ++ts; if (__Pyx_BufFmt_ProcessTypeChunk(ctx) == -1) return NULL; ctx->enc_type = 0; if (alignment && ctx->fmt_offset % alignment) { ctx->fmt_offset += alignment - (ctx->fmt_offset % alignment); } } return ts; case 'x': if (__Pyx_BufFmt_ProcessTypeChunk(ctx) == -1) return NULL; ctx->fmt_offset += ctx->new_count; ctx->new_count = 1; ctx->enc_count = 0; ctx->enc_type = 0; ctx->enc_packmode = ctx->new_packmode; ++ts; break; case 'Z': got_Z = 1; ++ts; if (*ts != 'f' && *ts != 'd' && *ts != 'g') { __Pyx_BufFmt_RaiseUnexpectedChar('Z'); return NULL; } case 'c': case 'b': case 'B': case 'h': case 'H': case 'i': case 'I': case 'l': case 'L': case 'q': case 'Q': case 'f': case 'd': case 'g': case 'O': case 'p': if (ctx->enc_type == *ts && got_Z == ctx->is_complex && ctx->enc_packmode == ctx->new_packmode) { ctx->enc_count += ctx->new_count; ctx->new_count = 1; got_Z = 0; ++ts; break; } case 's': if (__Pyx_BufFmt_ProcessTypeChunk(ctx) == -1) return NULL; ctx->enc_count = ctx->new_count; ctx->enc_packmode = ctx->new_packmode; ctx->enc_type = *ts; ctx->is_complex = got_Z; ++ts; ctx->new_count = 1; got_Z = 0; break; case ':': ++ts; while(*ts != ':') ++ts; ++ts; break; case '(': if (!__pyx_buffmt_parse_array(ctx, &ts)) return NULL; break; default: { int number = __Pyx_BufFmt_ExpectNumber(&ts); if (number == -1) return NULL; ctx->new_count = (size_t)number; } } } } static CYTHON_INLINE void __Pyx_ZeroBuffer(Py_buffer* buf) { buf->buf = NULL; buf->obj = NULL; buf->strides = __Pyx_zeros; buf->shape = __Pyx_zeros; buf->suboffsets = __Pyx_minusones; } static CYTHON_INLINE int __Pyx_GetBufferAndValidate( Py_buffer* buf, PyObject* obj, __Pyx_TypeInfo* dtype, int flags, int nd, int cast, __Pyx_BufFmt_StackElem* stack) { if (obj == Py_None || obj == NULL) { __Pyx_ZeroBuffer(buf); return 0; } buf->buf = NULL; if (__Pyx_GetBuffer(obj, buf, flags) == -1) goto fail; if (buf->ndim != nd) { PyErr_Format(PyExc_ValueError, "Buffer has wrong number of dimensions (expected %d, got %d)", nd, buf->ndim); goto fail; } if (!cast) { __Pyx_BufFmt_Context ctx; __Pyx_BufFmt_Init(&ctx, stack, dtype); if (!__Pyx_BufFmt_CheckString(&ctx, buf->format)) goto fail; } if ((unsigned)buf->itemsize != dtype->size) { PyErr_Format(PyExc_ValueError, "Item size of buffer (%" CYTHON_FORMAT_SSIZE_T "d byte%s) does not match size of '%s' (%" CYTHON_FORMAT_SSIZE_T "d byte%s)", buf->itemsize, (buf->itemsize > 1) ? "s" : "", dtype->name, (Py_ssize_t)dtype->size, (dtype->size > 1) ? "s" : ""); goto fail; } if (buf->suboffsets == NULL) buf->suboffsets = __Pyx_minusones; return 0; fail:; __Pyx_ZeroBuffer(buf); return -1; } static CYTHON_INLINE void __Pyx_SafeReleaseBuffer(Py_buffer* info) { if (info->buf == NULL) return; if (info->suboffsets == __Pyx_minusones) info->suboffsets = NULL; __Pyx_ReleaseBuffer(info); } /* GetBuiltinName */ static PyObject *__Pyx_GetBuiltinName(PyObject *name) { PyObject* result = __Pyx_PyObject_GetAttrStr(__pyx_b, name); if (unlikely(!result)) { PyErr_Format(PyExc_NameError, #if PY_MAJOR_VERSION >= 3 "name '%U' is not defined", name); #else "name '%.200s' is not defined", PyString_AS_STRING(name)); #endif } return result; } /* GetModuleGlobalName */ static CYTHON_INLINE PyObject *__Pyx_GetModuleGlobalName(PyObject *name) { PyObject *result; #if CYTHON_COMPILING_IN_CPYTHON result = PyDict_GetItem(__pyx_d, name); if (likely(result)) { Py_INCREF(result); } else { #else result = PyObject_GetItem(__pyx_d, name); if (!result) { PyErr_Clear(); #endif result = __Pyx_GetBuiltinName(name); } return result; } /* PyObjectCall */ #if CYTHON_COMPILING_IN_CPYTHON static CYTHON_INLINE PyObject* __Pyx_PyObject_Call(PyObject *func, PyObject *arg, PyObject *kw) { PyObject *result; ternaryfunc call = func->ob_type->tp_call; if (unlikely(!call)) return PyObject_Call(func, arg, kw); if (unlikely(Py_EnterRecursiveCall((char*)" while calling a Python object"))) return NULL; result = (*call)(func, arg, kw); Py_LeaveRecursiveCall(); if (unlikely(!result) && unlikely(!PyErr_Occurred())) { PyErr_SetString( PyExc_SystemError, "NULL result without error in PyObject_Call"); } return result; } #endif /* ExtTypeTest */ static CYTHON_INLINE int __Pyx_TypeTest(PyObject *obj, PyTypeObject *type) { if (unlikely(!type)) { PyErr_SetString(PyExc_SystemError, "Missing type object"); return 0; } if (likely(PyObject_TypeCheck(obj, type))) return 1; PyErr_Format(PyExc_TypeError, "Cannot convert %.200s to %.200s", Py_TYPE(obj)->tp_name, type->tp_name); return 0; } /* PyObjectCallMethO */ #if CYTHON_COMPILING_IN_CPYTHON static CYTHON_INLINE PyObject* __Pyx_PyObject_CallMethO(PyObject *func, PyObject *arg) { PyObject *self, *result; PyCFunction cfunc; cfunc = PyCFunction_GET_FUNCTION(func); self = PyCFunction_GET_SELF(func); if (unlikely(Py_EnterRecursiveCall((char*)" while calling a Python object"))) return NULL; result = cfunc(self, arg); Py_LeaveRecursiveCall(); if (unlikely(!result) && unlikely(!PyErr_Occurred())) { PyErr_SetString( PyExc_SystemError, "NULL result without error in PyObject_Call"); } return result; } #endif /* PyObjectCallOneArg */ #if CYTHON_COMPILING_IN_CPYTHON static PyObject* __Pyx__PyObject_CallOneArg(PyObject *func, PyObject *arg) { PyObject *result; PyObject *args = PyTuple_New(1); if (unlikely(!args)) return NULL; Py_INCREF(arg); PyTuple_SET_ITEM(args, 0, arg); result = __Pyx_PyObject_Call(func, args, NULL); Py_DECREF(args); return result; } static CYTHON_INLINE PyObject* __Pyx_PyObject_CallOneArg(PyObject *func, PyObject *arg) { #ifdef __Pyx_CyFunction_USED if (likely(PyCFunction_Check(func) || PyObject_TypeCheck(func, __pyx_CyFunctionType))) { #else if (likely(PyCFunction_Check(func))) { #endif if (likely(PyCFunction_GET_FLAGS(func) & METH_O)) { return __Pyx_PyObject_CallMethO(func, arg); } } return __Pyx__PyObject_CallOneArg(func, arg); } #else static CYTHON_INLINE PyObject* __Pyx_PyObject_CallOneArg(PyObject *func, PyObject *arg) { PyObject *result; PyObject *args = PyTuple_Pack(1, arg); if (unlikely(!args)) return NULL; result = __Pyx_PyObject_Call(func, args, NULL); Py_DECREF(args); return result; } #endif /* PyObjectCallNoArg */ #if CYTHON_COMPILING_IN_CPYTHON static CYTHON_INLINE PyObject* __Pyx_PyObject_CallNoArg(PyObject *func) { #ifdef __Pyx_CyFunction_USED if (likely(PyCFunction_Check(func) || PyObject_TypeCheck(func, __pyx_CyFunctionType))) { #else if (likely(PyCFunction_Check(func))) { #endif if (likely(PyCFunction_GET_FLAGS(func) & METH_NOARGS)) { return __Pyx_PyObject_CallMethO(func, NULL); } } return __Pyx_PyObject_Call(func, __pyx_empty_tuple, NULL); } #endif /* BufferIndexError */ static void __Pyx_RaiseBufferIndexError(int axis) { PyErr_Format(PyExc_IndexError, "Out of bounds on buffer access (axis %d)", axis); } /* SliceObject */ static CYTHON_INLINE PyObject* __Pyx_PyObject_GetSlice(PyObject* obj, Py_ssize_t cstart, Py_ssize_t cstop, PyObject** _py_start, PyObject** _py_stop, PyObject** _py_slice, int has_cstart, int has_cstop, CYTHON_UNUSED int wraparound) { #if CYTHON_COMPILING_IN_CPYTHON PyMappingMethods* mp; #if PY_MAJOR_VERSION < 3 PySequenceMethods* ms = Py_TYPE(obj)->tp_as_sequence; if (likely(ms && ms->sq_slice)) { if (!has_cstart) { if (_py_start && (*_py_start != Py_None)) { cstart = __Pyx_PyIndex_AsSsize_t(*_py_start); if ((cstart == (Py_ssize_t)-1) && PyErr_Occurred()) goto bad; } else cstart = 0; } if (!has_cstop) { if (_py_stop && (*_py_stop != Py_None)) { cstop = __Pyx_PyIndex_AsSsize_t(*_py_stop); if ((cstop == (Py_ssize_t)-1) && PyErr_Occurred()) goto bad; } else cstop = PY_SSIZE_T_MAX; } if (wraparound && unlikely((cstart < 0) | (cstop < 0)) && likely(ms->sq_length)) { Py_ssize_t l = ms->sq_length(obj); if (likely(l >= 0)) { if (cstop < 0) { cstop += l; if (cstop < 0) cstop = 0; } if (cstart < 0) { cstart += l; if (cstart < 0) cstart = 0; } } else { if (!PyErr_ExceptionMatches(PyExc_OverflowError)) goto bad; PyErr_Clear(); } } return ms->sq_slice(obj, cstart, cstop); } #endif mp = Py_TYPE(obj)->tp_as_mapping; if (likely(mp && mp->mp_subscript)) #endif { PyObject* result; PyObject *py_slice, *py_start, *py_stop; if (_py_slice) { py_slice = *_py_slice; } else { PyObject* owned_start = NULL; PyObject* owned_stop = NULL; if (_py_start) { py_start = *_py_start; } else { if (has_cstart) { owned_start = py_start = PyInt_FromSsize_t(cstart); if (unlikely(!py_start)) goto bad; } else py_start = Py_None; } if (_py_stop) { py_stop = *_py_stop; } else { if (has_cstop) { owned_stop = py_stop = PyInt_FromSsize_t(cstop); if (unlikely(!py_stop)) { Py_XDECREF(owned_start); goto bad; } } else py_stop = Py_None; } py_slice = PySlice_New(py_start, py_stop, Py_None); Py_XDECREF(owned_start); Py_XDECREF(owned_stop); if (unlikely(!py_slice)) goto bad; } #if CYTHON_COMPILING_IN_CPYTHON result = mp->mp_subscript(obj, py_slice); #else result = PyObject_GetItem(obj, py_slice); #endif if (!_py_slice) { Py_DECREF(py_slice); } return result; } PyErr_Format(PyExc_TypeError, "'%.200s' object is unsliceable", Py_TYPE(obj)->tp_name); bad: return NULL; } /* BufferFallbackError */ static void __Pyx_RaiseBufferFallbackError(void) { PyErr_SetString(PyExc_ValueError, "Buffer acquisition failed on assignment; and then reacquiring the old buffer failed too!"); } /* PyErrFetchRestore */ #if CYTHON_COMPILING_IN_CPYTHON static CYTHON_INLINE void __Pyx_ErrRestoreInState(PyThreadState *tstate, PyObject *type, PyObject *value, PyObject *tb) { PyObject *tmp_type, *tmp_value, *tmp_tb; tmp_type = tstate->curexc_type; tmp_value = tstate->curexc_value; tmp_tb = tstate->curexc_traceback; tstate->curexc_type = type; tstate->curexc_value = value; tstate->curexc_traceback = tb; Py_XDECREF(tmp_type); Py_XDECREF(tmp_value); Py_XDECREF(tmp_tb); } static CYTHON_INLINE void __Pyx_ErrFetchInState(PyThreadState *tstate, PyObject **type, PyObject **value, PyObject **tb) { *type = tstate->curexc_type; *value = tstate->curexc_value; *tb = tstate->curexc_traceback; tstate->curexc_type = 0; tstate->curexc_value = 0; tstate->curexc_traceback = 0; } #endif /* RaiseException */ #if PY_MAJOR_VERSION < 3 static void __Pyx_Raise(PyObject *type, PyObject *value, PyObject *tb, CYTHON_UNUSED PyObject *cause) { __Pyx_PyThreadState_declare Py_XINCREF(type); if (!value || value == Py_None) value = NULL; else Py_INCREF(value); if (!tb || tb == Py_None) tb = NULL; else { Py_INCREF(tb); if (!PyTraceBack_Check(tb)) { PyErr_SetString(PyExc_TypeError, "raise: arg 3 must be a traceback or None"); goto raise_error; } } if (PyType_Check(type)) { #if CYTHON_COMPILING_IN_PYPY if (!value) { Py_INCREF(Py_None); value = Py_None; } #endif PyErr_NormalizeException(&type, &value, &tb); } else { if (value) { PyErr_SetString(PyExc_TypeError, "instance exception may not have a separate value"); goto raise_error; } value = type; type = (PyObject*) Py_TYPE(type); Py_INCREF(type); if (!PyType_IsSubtype((PyTypeObject *)type, (PyTypeObject *)PyExc_BaseException)) { PyErr_SetString(PyExc_TypeError, "raise: exception class must be a subclass of BaseException"); goto raise_error; } } __Pyx_PyThreadState_assign __Pyx_ErrRestore(type, value, tb); return; raise_error: Py_XDECREF(value); Py_XDECREF(type); Py_XDECREF(tb); return; } #else static void __Pyx_Raise(PyObject *type, PyObject *value, PyObject *tb, PyObject *cause) { PyObject* owned_instance = NULL; if (tb == Py_None) { tb = 0; } else if (tb && !PyTraceBack_Check(tb)) { PyErr_SetString(PyExc_TypeError, "raise: arg 3 must be a traceback or None"); goto bad; } if (value == Py_None) value = 0; if (PyExceptionInstance_Check(type)) { if (value) { PyErr_SetString(PyExc_TypeError, "instance exception may not have a separate value"); goto bad; } value = type; type = (PyObject*) Py_TYPE(value); } else if (PyExceptionClass_Check(type)) { PyObject *instance_class = NULL; if (value && PyExceptionInstance_Check(value)) { instance_class = (PyObject*) Py_TYPE(value); if (instance_class != type) { int is_subclass = PyObject_IsSubclass(instance_class, type); if (!is_subclass) { instance_class = NULL; } else if (unlikely(is_subclass == -1)) { goto bad; } else { type = instance_class; } } } if (!instance_class) { PyObject *args; if (!value) args = PyTuple_New(0); else if (PyTuple_Check(value)) { Py_INCREF(value); args = value; } else args = PyTuple_Pack(1, value); if (!args) goto bad; owned_instance = PyObject_Call(type, args, NULL); Py_DECREF(args); if (!owned_instance) goto bad; value = owned_instance; if (!PyExceptionInstance_Check(value)) { PyErr_Format(PyExc_TypeError, "calling %R should have returned an instance of " "BaseException, not %R", type, Py_TYPE(value)); goto bad; } } } else { PyErr_SetString(PyExc_TypeError, "raise: exception class must be a subclass of BaseException"); goto bad; } #if PY_VERSION_HEX >= 0x03030000 if (cause) { #else if (cause && cause != Py_None) { #endif PyObject *fixed_cause; if (cause == Py_None) { fixed_cause = NULL; } else if (PyExceptionClass_Check(cause)) { fixed_cause = PyObject_CallObject(cause, NULL); if (fixed_cause == NULL) goto bad; } else if (PyExceptionInstance_Check(cause)) { fixed_cause = cause; Py_INCREF(fixed_cause); } else { PyErr_SetString(PyExc_TypeError, "exception causes must derive from " "BaseException"); goto bad; } PyException_SetCause(value, fixed_cause); } PyErr_SetObject(type, value); if (tb) { #if CYTHON_COMPILING_IN_PYPY PyObject *tmp_type, *tmp_value, *tmp_tb; PyErr_Fetch(&tmp_type, &tmp_value, &tmp_tb); Py_INCREF(tb); PyErr_Restore(tmp_type, tmp_value, tb); Py_XDECREF(tmp_tb); #else PyThreadState *tstate = PyThreadState_GET(); PyObject* tmp_tb = tstate->curexc_traceback; if (tb != tmp_tb) { Py_INCREF(tb); tstate->curexc_traceback = tb; Py_XDECREF(tmp_tb); } #endif } bad: Py_XDECREF(owned_instance); return; } #endif /* RaiseTooManyValuesToUnpack */ static CYTHON_INLINE void __Pyx_RaiseTooManyValuesError(Py_ssize_t expected) { PyErr_Format(PyExc_ValueError, "too many values to unpack (expected %" CYTHON_FORMAT_SSIZE_T "d)", expected); } /* RaiseNeedMoreValuesToUnpack */ static CYTHON_INLINE void __Pyx_RaiseNeedMoreValuesError(Py_ssize_t index) { PyErr_Format(PyExc_ValueError, "need more than %" CYTHON_FORMAT_SSIZE_T "d value%.1s to unpack", index, (index == 1) ? "" : "s"); } /* RaiseNoneIterError */ static CYTHON_INLINE void __Pyx_RaiseNoneNotIterableError(void) { PyErr_SetString(PyExc_TypeError, "'NoneType' object is not iterable"); } /* Import */ static PyObject *__Pyx_Import(PyObject *name, PyObject *from_list, int level) { PyObject *empty_list = 0; PyObject *module = 0; PyObject *global_dict = 0; PyObject *empty_dict = 0; PyObject *list; #if PY_VERSION_HEX < 0x03030000 PyObject *py_import; py_import = __Pyx_PyObject_GetAttrStr(__pyx_b, __pyx_n_s_import); if (!py_import) goto bad; #endif if (from_list) list = from_list; else { empty_list = PyList_New(0); if (!empty_list) goto bad; list = empty_list; } global_dict = PyModule_GetDict(__pyx_m); if (!global_dict) goto bad; empty_dict = PyDict_New(); if (!empty_dict) goto bad; { #if PY_MAJOR_VERSION >= 3 if (level == -1) { if (strchr(__Pyx_MODULE_NAME, '.')) { #if PY_VERSION_HEX < 0x03030000 PyObject *py_level = PyInt_FromLong(1); if (!py_level) goto bad; module = PyObject_CallFunctionObjArgs(py_import, name, global_dict, empty_dict, list, py_level, NULL); Py_DECREF(py_level); #else module = PyImport_ImportModuleLevelObject( name, global_dict, empty_dict, list, 1); #endif if (!module) { if (!PyErr_ExceptionMatches(PyExc_ImportError)) goto bad; PyErr_Clear(); } } level = 0; } #endif if (!module) { #if PY_VERSION_HEX < 0x03030000 PyObject *py_level = PyInt_FromLong(level); if (!py_level) goto bad; module = PyObject_CallFunctionObjArgs(py_import, name, global_dict, empty_dict, list, py_level, NULL); Py_DECREF(py_level); #else module = PyImport_ImportModuleLevelObject( name, global_dict, empty_dict, list, level); #endif } } bad: #if PY_VERSION_HEX < 0x03030000 Py_XDECREF(py_import); #endif Py_XDECREF(empty_list); Py_XDECREF(empty_dict); return module; } /* CodeObjectCache */ static int __pyx_bisect_code_objects(__Pyx_CodeObjectCacheEntry* entries, int count, int code_line) { int start = 0, mid = 0, end = count - 1; if (end >= 0 && code_line > entries[end].code_line) { return count; } while (start < end) { mid = start + (end - start) / 2; if (code_line < entries[mid].code_line) { end = mid; } else if (code_line > entries[mid].code_line) { start = mid + 1; } else { return mid; } } if (code_line <= entries[mid].code_line) { return mid; } else { return mid + 1; } } static PyCodeObject *__pyx_find_code_object(int code_line) { PyCodeObject* code_object; int pos; if (unlikely(!code_line) || unlikely(!__pyx_code_cache.entries)) { return NULL; } pos = __pyx_bisect_code_objects(__pyx_code_cache.entries, __pyx_code_cache.count, code_line); if (unlikely(pos >= __pyx_code_cache.count) || unlikely(__pyx_code_cache.entries[pos].code_line != code_line)) { return NULL; } code_object = __pyx_code_cache.entries[pos].code_object; Py_INCREF(code_object); return code_object; } static void __pyx_insert_code_object(int code_line, PyCodeObject* code_object) { int pos, i; __Pyx_CodeObjectCacheEntry* entries = __pyx_code_cache.entries; if (unlikely(!code_line)) { return; } if (unlikely(!entries)) { entries = (__Pyx_CodeObjectCacheEntry*)PyMem_Malloc(64*sizeof(__Pyx_CodeObjectCacheEntry)); if (likely(entries)) { __pyx_code_cache.entries = entries; __pyx_code_cache.max_count = 64; __pyx_code_cache.count = 1; entries[0].code_line = code_line; entries[0].code_object = code_object; Py_INCREF(code_object); } return; } pos = __pyx_bisect_code_objects(__pyx_code_cache.entries, __pyx_code_cache.count, code_line); if ((pos < __pyx_code_cache.count) && unlikely(__pyx_code_cache.entries[pos].code_line == code_line)) { PyCodeObject* tmp = entries[pos].code_object; entries[pos].code_object = code_object; Py_DECREF(tmp); return; } if (__pyx_code_cache.count == __pyx_code_cache.max_count) { int new_max = __pyx_code_cache.max_count + 64; entries = (__Pyx_CodeObjectCacheEntry*)PyMem_Realloc( __pyx_code_cache.entries, (size_t)new_max*sizeof(__Pyx_CodeObjectCacheEntry)); if (unlikely(!entries)) { return; } __pyx_code_cache.entries = entries; __pyx_code_cache.max_count = new_max; } for (i=__pyx_code_cache.count; i>pos; i--) { entries[i] = entries[i-1]; } entries[pos].code_line = code_line; entries[pos].code_object = code_object; __pyx_code_cache.count++; Py_INCREF(code_object); } /* AddTraceback */ #include "compile.h" #include "frameobject.h" #include "traceback.h" static PyCodeObject* __Pyx_CreateCodeObjectForTraceback( const char *funcname, int c_line, int py_line, const char *filename) { PyCodeObject *py_code = 0; PyObject *py_srcfile = 0; PyObject *py_funcname = 0; #if PY_MAJOR_VERSION < 3 py_srcfile = PyString_FromString(filename); #else py_srcfile = PyUnicode_FromString(filename); #endif if (!py_srcfile) goto bad; if (c_line) { #if PY_MAJOR_VERSION < 3 py_funcname = PyString_FromFormat( "%s (%s:%d)", funcname, __pyx_cfilenm, c_line); #else py_funcname = PyUnicode_FromFormat( "%s (%s:%d)", funcname, __pyx_cfilenm, c_line); #endif } else { #if PY_MAJOR_VERSION < 3 py_funcname = PyString_FromString(funcname); #else py_funcname = PyUnicode_FromString(funcname); #endif } if (!py_funcname) goto bad; py_code = __Pyx_PyCode_New( 0, 0, 0, 0, 0, __pyx_empty_bytes, /*PyObject *code,*/ __pyx_empty_tuple, /*PyObject *consts,*/ __pyx_empty_tuple, /*PyObject *names,*/ __pyx_empty_tuple, /*PyObject *varnames,*/ __pyx_empty_tuple, /*PyObject *freevars,*/ __pyx_empty_tuple, /*PyObject *cellvars,*/ py_srcfile, /*PyObject *filename,*/ py_funcname, /*PyObject *name,*/ py_line, __pyx_empty_bytes /*PyObject *lnotab*/ ); Py_DECREF(py_srcfile); Py_DECREF(py_funcname); return py_code; bad: Py_XDECREF(py_srcfile); Py_XDECREF(py_funcname); return NULL; } static void __Pyx_AddTraceback(const char *funcname, int c_line, int py_line, const char *filename) { PyCodeObject *py_code = 0; PyFrameObject *py_frame = 0; py_code = __pyx_find_code_object(c_line ? c_line : py_line); if (!py_code) { py_code = __Pyx_CreateCodeObjectForTraceback( funcname, c_line, py_line, filename); if (!py_code) goto bad; __pyx_insert_code_object(c_line ? c_line : py_line, py_code); } py_frame = PyFrame_New( PyThreadState_GET(), /*PyThreadState *tstate,*/ py_code, /*PyCodeObject *code,*/ __pyx_d, /*PyObject *globals,*/ 0 /*PyObject *locals*/ ); if (!py_frame) goto bad; py_frame->f_lineno = py_line; PyTraceBack_Here(py_frame); bad: Py_XDECREF(py_code); Py_XDECREF(py_frame); } #if PY_MAJOR_VERSION < 3 static int __Pyx_GetBuffer(PyObject *obj, Py_buffer *view, int flags) { if (PyObject_CheckBuffer(obj)) return PyObject_GetBuffer(obj, view, flags); if (PyObject_TypeCheck(obj, __pyx_ptype_5numpy_ndarray)) return __pyx_pw_5numpy_7ndarray_1__getbuffer__(obj, view, flags); PyErr_Format(PyExc_TypeError, "'%.200s' does not have the buffer interface", Py_TYPE(obj)->tp_name); return -1; } static void __Pyx_ReleaseBuffer(Py_buffer *view) { PyObject *obj = view->obj; if (!obj) return; if (PyObject_CheckBuffer(obj)) { PyBuffer_Release(view); return; } if (PyObject_TypeCheck(obj, __pyx_ptype_5numpy_ndarray)) { __pyx_pw_5numpy_7ndarray_3__releasebuffer__(obj, view); return; } Py_DECREF(obj); view->obj = NULL; } #endif /* CIntFromPyVerify */ #define __PYX_VERIFY_RETURN_INT(target_type, func_type, func_value)\ __PYX__VERIFY_RETURN_INT(target_type, func_type, func_value, 0) #define __PYX_VERIFY_RETURN_INT_EXC(target_type, func_type, func_value)\ __PYX__VERIFY_RETURN_INT(target_type, func_type, func_value, 1) #define __PYX__VERIFY_RETURN_INT(target_type, func_type, func_value, exc)\ {\ func_type value = func_value;\ if (sizeof(target_type) < sizeof(func_type)) {\ if (unlikely(value != (func_type) (target_type) value)) {\ func_type zero = 0;\ if (exc && unlikely(value == (func_type)-1 && PyErr_Occurred()))\ return (target_type) -1;\ if (is_unsigned && unlikely(value < zero))\ goto raise_neg_overflow;\ else\ goto raise_overflow;\ }\ }\ return (target_type) value;\ } /* CIntToPy */ static CYTHON_INLINE PyObject* __Pyx_PyInt_From_int(int value) { const int neg_one = (int) -1, const_zero = (int) 0; const int is_unsigned = neg_one > const_zero; if (is_unsigned) { if (sizeof(int) < sizeof(long)) { return PyInt_FromLong((long) value); } else if (sizeof(int) <= sizeof(unsigned long)) { return PyLong_FromUnsignedLong((unsigned long) value); } else if (sizeof(int) <= sizeof(unsigned PY_LONG_LONG)) { return PyLong_FromUnsignedLongLong((unsigned PY_LONG_LONG) value); } } else { if (sizeof(int) <= sizeof(long)) { return PyInt_FromLong((long) value); } else if (sizeof(int) <= sizeof(PY_LONG_LONG)) { return PyLong_FromLongLong((PY_LONG_LONG) value); } } { int one = 1; int little = (int)*(unsigned char *)&one; unsigned char *bytes = (unsigned char *)&value; return _PyLong_FromByteArray(bytes, sizeof(int), little, !is_unsigned); } } /* None */ #if CYTHON_CCOMPLEX #ifdef __cplusplus static CYTHON_INLINE __pyx_t_float_complex __pyx_t_float_complex_from_parts(float x, float y) { return ::std::complex< float >(x, y); } #else static CYTHON_INLINE __pyx_t_float_complex __pyx_t_float_complex_from_parts(float x, float y) { return x + y*(__pyx_t_float_complex)_Complex_I; } #endif #else static CYTHON_INLINE __pyx_t_float_complex __pyx_t_float_complex_from_parts(float x, float y) { __pyx_t_float_complex z; z.real = x; z.imag = y; return z; } #endif /* None */ #if CYTHON_CCOMPLEX #else static CYTHON_INLINE int __Pyx_c_eqf(__pyx_t_float_complex a, __pyx_t_float_complex b) { return (a.real == b.real) && (a.imag == b.imag); } static CYTHON_INLINE __pyx_t_float_complex __Pyx_c_sumf(__pyx_t_float_complex a, __pyx_t_float_complex b) { __pyx_t_float_complex z; z.real = a.real + b.real; z.imag = a.imag + b.imag; return z; } static CYTHON_INLINE __pyx_t_float_complex __Pyx_c_difff(__pyx_t_float_complex a, __pyx_t_float_complex b) { __pyx_t_float_complex z; z.real = a.real - b.real; z.imag = a.imag - b.imag; return z; } static CYTHON_INLINE __pyx_t_float_complex __Pyx_c_prodf(__pyx_t_float_complex a, __pyx_t_float_complex b) { __pyx_t_float_complex z; z.real = a.real * b.real - a.imag * b.imag; z.imag = a.real * b.imag + a.imag * b.real; return z; } static CYTHON_INLINE __pyx_t_float_complex __Pyx_c_quotf(__pyx_t_float_complex a, __pyx_t_float_complex b) { __pyx_t_float_complex z; float denom = b.real * b.real + b.imag * b.imag; z.real = (a.real * b.real + a.imag * b.imag) / denom; z.imag = (a.imag * b.real - a.real * b.imag) / denom; return z; } static CYTHON_INLINE __pyx_t_float_complex __Pyx_c_negf(__pyx_t_float_complex a) { __pyx_t_float_complex z; z.real = -a.real; z.imag = -a.imag; return z; } static CYTHON_INLINE int __Pyx_c_is_zerof(__pyx_t_float_complex a) { return (a.real == 0) && (a.imag == 0); } static CYTHON_INLINE __pyx_t_float_complex __Pyx_c_conjf(__pyx_t_float_complex a) { __pyx_t_float_complex z; z.real = a.real; z.imag = -a.imag; return z; } #if 1 static CYTHON_INLINE float __Pyx_c_absf(__pyx_t_float_complex z) { #if !defined(HAVE_HYPOT) || defined(_MSC_VER) return sqrtf(z.real*z.real + z.imag*z.imag); #else return hypotf(z.real, z.imag); #endif } static CYTHON_INLINE __pyx_t_float_complex __Pyx_c_powf(__pyx_t_float_complex a, __pyx_t_float_complex b) { __pyx_t_float_complex z; float r, lnr, theta, z_r, z_theta; if (b.imag == 0 && b.real == (int)b.real) { if (b.real < 0) { float denom = a.real * a.real + a.imag * a.imag; a.real = a.real / denom; a.imag = -a.imag / denom; b.real = -b.real; } switch ((int)b.real) { case 0: z.real = 1; z.imag = 0; return z; case 1: return a; case 2: z = __Pyx_c_prodf(a, a); return __Pyx_c_prodf(a, a); case 3: z = __Pyx_c_prodf(a, a); return __Pyx_c_prodf(z, a); case 4: z = __Pyx_c_prodf(a, a); return __Pyx_c_prodf(z, z); } } if (a.imag == 0) { if (a.real == 0) { return a; } r = a.real; theta = 0; } else { r = __Pyx_c_absf(a); theta = atan2f(a.imag, a.real); } lnr = logf(r); z_r = expf(lnr * b.real - theta * b.imag); z_theta = theta * b.real + lnr * b.imag; z.real = z_r * cosf(z_theta); z.imag = z_r * sinf(z_theta); return z; } #endif #endif /* None */ #if CYTHON_CCOMPLEX #ifdef __cplusplus static CYTHON_INLINE __pyx_t_double_complex __pyx_t_double_complex_from_parts(double x, double y) { return ::std::complex< double >(x, y); } #else static CYTHON_INLINE __pyx_t_double_complex __pyx_t_double_complex_from_parts(double x, double y) { return x + y*(__pyx_t_double_complex)_Complex_I; } #endif #else static CYTHON_INLINE __pyx_t_double_complex __pyx_t_double_complex_from_parts(double x, double y) { __pyx_t_double_complex z; z.real = x; z.imag = y; return z; } #endif /* None */ #if CYTHON_CCOMPLEX #else static CYTHON_INLINE int __Pyx_c_eq(__pyx_t_double_complex a, __pyx_t_double_complex b) { return (a.real == b.real) && (a.imag == b.imag); } static CYTHON_INLINE __pyx_t_double_complex __Pyx_c_sum(__pyx_t_double_complex a, __pyx_t_double_complex b) { __pyx_t_double_complex z; z.real = a.real + b.real; z.imag = a.imag + b.imag; return z; } static CYTHON_INLINE __pyx_t_double_complex __Pyx_c_diff(__pyx_t_double_complex a, __pyx_t_double_complex b) { __pyx_t_double_complex z; z.real = a.real - b.real; z.imag = a.imag - b.imag; return z; } static CYTHON_INLINE __pyx_t_double_complex __Pyx_c_prod(__pyx_t_double_complex a, __pyx_t_double_complex b) { __pyx_t_double_complex z; z.real = a.real * b.real - a.imag * b.imag; z.imag = a.real * b.imag + a.imag * b.real; return z; } static CYTHON_INLINE __pyx_t_double_complex __Pyx_c_quot(__pyx_t_double_complex a, __pyx_t_double_complex b) { __pyx_t_double_complex z; double denom = b.real * b.real + b.imag * b.imag; z.real = (a.real * b.real + a.imag * b.imag) / denom; z.imag = (a.imag * b.real - a.real * b.imag) / denom; return z; } static CYTHON_INLINE __pyx_t_double_complex __Pyx_c_neg(__pyx_t_double_complex a) { __pyx_t_double_complex z; z.real = -a.real; z.imag = -a.imag; return z; } static CYTHON_INLINE int __Pyx_c_is_zero(__pyx_t_double_complex a) { return (a.real == 0) && (a.imag == 0); } static CYTHON_INLINE __pyx_t_double_complex __Pyx_c_conj(__pyx_t_double_complex a) { __pyx_t_double_complex z; z.real = a.real; z.imag = -a.imag; return z; } #if 1 static CYTHON_INLINE double __Pyx_c_abs(__pyx_t_double_complex z) { #if !defined(HAVE_HYPOT) || defined(_MSC_VER) return sqrt(z.real*z.real + z.imag*z.imag); #else return hypot(z.real, z.imag); #endif } static CYTHON_INLINE __pyx_t_double_complex __Pyx_c_pow(__pyx_t_double_complex a, __pyx_t_double_complex b) { __pyx_t_double_complex z; double r, lnr, theta, z_r, z_theta; if (b.imag == 0 && b.real == (int)b.real) { if (b.real < 0) { double denom = a.real * a.real + a.imag * a.imag; a.real = a.real / denom; a.imag = -a.imag / denom; b.real = -b.real; } switch ((int)b.real) { case 0: z.real = 1; z.imag = 0; return z; case 1: return a; case 2: z = __Pyx_c_prod(a, a); return __Pyx_c_prod(a, a); case 3: z = __Pyx_c_prod(a, a); return __Pyx_c_prod(z, a); case 4: z = __Pyx_c_prod(a, a); return __Pyx_c_prod(z, z); } } if (a.imag == 0) { if (a.real == 0) { return a; } r = a.real; theta = 0; } else { r = __Pyx_c_abs(a); theta = atan2(a.imag, a.real); } lnr = log(r); z_r = exp(lnr * b.real - theta * b.imag); z_theta = theta * b.real + lnr * b.imag; z.real = z_r * cos(z_theta); z.imag = z_r * sin(z_theta); return z; } #endif #endif /* CIntToPy */ static CYTHON_INLINE PyObject* __Pyx_PyInt_From_enum__NPY_TYPES(enum NPY_TYPES value) { const enum NPY_TYPES neg_one = (enum NPY_TYPES) -1, const_zero = (enum NPY_TYPES) 0; const int is_unsigned = neg_one > const_zero; if (is_unsigned) { if (sizeof(enum NPY_TYPES) < sizeof(long)) { return PyInt_FromLong((long) value); } else if (sizeof(enum NPY_TYPES) <= sizeof(unsigned long)) { return PyLong_FromUnsignedLong((unsigned long) value); } else if (sizeof(enum NPY_TYPES) <= sizeof(unsigned PY_LONG_LONG)) { return PyLong_FromUnsignedLongLong((unsigned PY_LONG_LONG) value); } } else { if (sizeof(enum NPY_TYPES) <= sizeof(long)) { return PyInt_FromLong((long) value); } else if (sizeof(enum NPY_TYPES) <= sizeof(PY_LONG_LONG)) { return PyLong_FromLongLong((PY_LONG_LONG) value); } } { int one = 1; int little = (int)*(unsigned char *)&one; unsigned char *bytes = (unsigned char *)&value; return _PyLong_FromByteArray(bytes, sizeof(enum NPY_TYPES), little, !is_unsigned); } } /* CIntFromPy */ static CYTHON_INLINE npy_int32 __Pyx_PyInt_As_npy_int32(PyObject *x) { const npy_int32 neg_one = (npy_int32) -1, const_zero = (npy_int32) 0; const int is_unsigned = neg_one > const_zero; #if PY_MAJOR_VERSION < 3 if (likely(PyInt_Check(x))) { if (sizeof(npy_int32) < sizeof(long)) { __PYX_VERIFY_RETURN_INT(npy_int32, long, PyInt_AS_LONG(x)) } else { long val = PyInt_AS_LONG(x); if (is_unsigned && unlikely(val < 0)) { goto raise_neg_overflow; } return (npy_int32) val; } } else #endif if (likely(PyLong_Check(x))) { if (is_unsigned) { #if CYTHON_USE_PYLONG_INTERNALS const digit* digits = ((PyLongObject*)x)->ob_digit; switch (Py_SIZE(x)) { case 0: return (npy_int32) 0; case 1: __PYX_VERIFY_RETURN_INT(npy_int32, digit, digits[0]) case 2: if (8 * sizeof(npy_int32) > 1 * PyLong_SHIFT) { if (8 * sizeof(unsigned long) > 2 * PyLong_SHIFT) { __PYX_VERIFY_RETURN_INT(npy_int32, unsigned long, (((((unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]))) } else if (8 * sizeof(npy_int32) >= 2 * PyLong_SHIFT) { return (npy_int32) (((((npy_int32)digits[1]) << PyLong_SHIFT) | (npy_int32)digits[0])); } } break; case 3: if (8 * sizeof(npy_int32) > 2 * PyLong_SHIFT) { if (8 * sizeof(unsigned long) > 3 * PyLong_SHIFT) { __PYX_VERIFY_RETURN_INT(npy_int32, unsigned long, (((((((unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]))) } else if (8 * sizeof(npy_int32) >= 3 * PyLong_SHIFT) { return (npy_int32) (((((((npy_int32)digits[2]) << PyLong_SHIFT) | (npy_int32)digits[1]) << PyLong_SHIFT) | (npy_int32)digits[0])); } } break; case 4: if (8 * sizeof(npy_int32) > 3 * PyLong_SHIFT) { if (8 * sizeof(unsigned long) > 4 * PyLong_SHIFT) { __PYX_VERIFY_RETURN_INT(npy_int32, unsigned long, (((((((((unsigned long)digits[3]) << PyLong_SHIFT) | (unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]))) } else if (8 * sizeof(npy_int32) >= 4 * PyLong_SHIFT) { return (npy_int32) (((((((((npy_int32)digits[3]) << PyLong_SHIFT) | (npy_int32)digits[2]) << PyLong_SHIFT) | (npy_int32)digits[1]) << PyLong_SHIFT) | (npy_int32)digits[0])); } } break; } #endif #if CYTHON_COMPILING_IN_CPYTHON if (unlikely(Py_SIZE(x) < 0)) { goto raise_neg_overflow; } #else { int result = PyObject_RichCompareBool(x, Py_False, Py_LT); if (unlikely(result < 0)) return (npy_int32) -1; if (unlikely(result == 1)) goto raise_neg_overflow; } #endif if (sizeof(npy_int32) <= sizeof(unsigned long)) { __PYX_VERIFY_RETURN_INT_EXC(npy_int32, unsigned long, PyLong_AsUnsignedLong(x)) } else if (sizeof(npy_int32) <= sizeof(unsigned PY_LONG_LONG)) { __PYX_VERIFY_RETURN_INT_EXC(npy_int32, unsigned PY_LONG_LONG, PyLong_AsUnsignedLongLong(x)) } } else { #if CYTHON_USE_PYLONG_INTERNALS const digit* digits = ((PyLongObject*)x)->ob_digit; switch (Py_SIZE(x)) { case 0: return (npy_int32) 0; case -1: __PYX_VERIFY_RETURN_INT(npy_int32, sdigit, (sdigit) (-(sdigit)digits[0])) case 1: __PYX_VERIFY_RETURN_INT(npy_int32, digit, +digits[0]) case -2: if (8 * sizeof(npy_int32) - 1 > 1 * PyLong_SHIFT) { if (8 * sizeof(unsigned long) > 2 * PyLong_SHIFT) { __PYX_VERIFY_RETURN_INT(npy_int32, long, -(long) (((((unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]))) } else if (8 * sizeof(npy_int32) - 1 > 2 * PyLong_SHIFT) { return (npy_int32) (((npy_int32)-1)*(((((npy_int32)digits[1]) << PyLong_SHIFT) | (npy_int32)digits[0]))); } } break; case 2: if (8 * sizeof(npy_int32) > 1 * PyLong_SHIFT) { if (8 * sizeof(unsigned long) > 2 * PyLong_SHIFT) { __PYX_VERIFY_RETURN_INT(npy_int32, unsigned long, (((((unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]))) } else if (8 * sizeof(npy_int32) - 1 > 2 * PyLong_SHIFT) { return (npy_int32) ((((((npy_int32)digits[1]) << PyLong_SHIFT) | (npy_int32)digits[0]))); } } break; case -3: if (8 * sizeof(npy_int32) - 1 > 2 * PyLong_SHIFT) { if (8 * sizeof(unsigned long) > 3 * PyLong_SHIFT) { __PYX_VERIFY_RETURN_INT(npy_int32, long, -(long) (((((((unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]))) } else if (8 * sizeof(npy_int32) - 1 > 3 * PyLong_SHIFT) { return (npy_int32) (((npy_int32)-1)*(((((((npy_int32)digits[2]) << PyLong_SHIFT) | (npy_int32)digits[1]) << PyLong_SHIFT) | (npy_int32)digits[0]))); } } break; case 3: if (8 * sizeof(npy_int32) > 2 * PyLong_SHIFT) { if (8 * sizeof(unsigned long) > 3 * PyLong_SHIFT) { __PYX_VERIFY_RETURN_INT(npy_int32, unsigned long, (((((((unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]))) } else if (8 * sizeof(npy_int32) - 1 > 3 * PyLong_SHIFT) { return (npy_int32) ((((((((npy_int32)digits[2]) << PyLong_SHIFT) | (npy_int32)digits[1]) << PyLong_SHIFT) | (npy_int32)digits[0]))); } } break; case -4: if (8 * sizeof(npy_int32) - 1 > 3 * PyLong_SHIFT) { if (8 * sizeof(unsigned long) > 4 * PyLong_SHIFT) { __PYX_VERIFY_RETURN_INT(npy_int32, long, -(long) (((((((((unsigned long)digits[3]) << PyLong_SHIFT) | (unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]))) } else if (8 * sizeof(npy_int32) - 1 > 4 * PyLong_SHIFT) { return (npy_int32) (((npy_int32)-1)*(((((((((npy_int32)digits[3]) << PyLong_SHIFT) | (npy_int32)digits[2]) << PyLong_SHIFT) | (npy_int32)digits[1]) << PyLong_SHIFT) | (npy_int32)digits[0]))); } } break; case 4: if (8 * sizeof(npy_int32) > 3 * PyLong_SHIFT) { if (8 * sizeof(unsigned long) > 4 * PyLong_SHIFT) { __PYX_VERIFY_RETURN_INT(npy_int32, unsigned long, (((((((((unsigned long)digits[3]) << PyLong_SHIFT) | (unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]))) } else if (8 * sizeof(npy_int32) - 1 > 4 * PyLong_SHIFT) { return (npy_int32) ((((((((((npy_int32)digits[3]) << PyLong_SHIFT) | (npy_int32)digits[2]) << PyLong_SHIFT) | (npy_int32)digits[1]) << PyLong_SHIFT) | (npy_int32)digits[0]))); } } break; } #endif if (sizeof(npy_int32) <= sizeof(long)) { __PYX_VERIFY_RETURN_INT_EXC(npy_int32, long, PyLong_AsLong(x)) } else if (sizeof(npy_int32) <= sizeof(PY_LONG_LONG)) { __PYX_VERIFY_RETURN_INT_EXC(npy_int32, PY_LONG_LONG, PyLong_AsLongLong(x)) } } { #if CYTHON_COMPILING_IN_PYPY && !defined(_PyLong_AsByteArray) PyErr_SetString(PyExc_RuntimeError, "_PyLong_AsByteArray() not available in PyPy, cannot convert large numbers"); #else npy_int32 val; PyObject *v = __Pyx_PyNumber_IntOrLong(x); #if PY_MAJOR_VERSION < 3 if (likely(v) && !PyLong_Check(v)) { PyObject *tmp = v; v = PyNumber_Long(tmp); Py_DECREF(tmp); } #endif if (likely(v)) { int one = 1; int is_little = (int)*(unsigned char *)&one; unsigned char *bytes = (unsigned char *)&val; int ret = _PyLong_AsByteArray((PyLongObject *)v, bytes, sizeof(val), is_little, !is_unsigned); Py_DECREF(v); if (likely(!ret)) return val; } #endif return (npy_int32) -1; } } else { npy_int32 val; PyObject *tmp = __Pyx_PyNumber_IntOrLong(x); if (!tmp) return (npy_int32) -1; val = __Pyx_PyInt_As_npy_int32(tmp); Py_DECREF(tmp); return val; } raise_overflow: PyErr_SetString(PyExc_OverflowError, "value too large to convert to npy_int32"); return (npy_int32) -1; raise_neg_overflow: PyErr_SetString(PyExc_OverflowError, "can't convert negative value to npy_int32"); return (npy_int32) -1; } /* CIntFromPy */ static CYTHON_INLINE int __Pyx_PyInt_As_int(PyObject *x) { const int neg_one = (int) -1, const_zero = (int) 0; const int is_unsigned = neg_one > const_zero; #if PY_MAJOR_VERSION < 3 if (likely(PyInt_Check(x))) { if (sizeof(int) < sizeof(long)) { __PYX_VERIFY_RETURN_INT(int, long, PyInt_AS_LONG(x)) } else { long val = PyInt_AS_LONG(x); if (is_unsigned && unlikely(val < 0)) { goto raise_neg_overflow; } return (int) val; } } else #endif if (likely(PyLong_Check(x))) { if (is_unsigned) { #if CYTHON_USE_PYLONG_INTERNALS const digit* digits = ((PyLongObject*)x)->ob_digit; switch (Py_SIZE(x)) { case 0: return (int) 0; case 1: __PYX_VERIFY_RETURN_INT(int, digit, digits[0]) case 2: if (8 * sizeof(int) > 1 * PyLong_SHIFT) { if (8 * sizeof(unsigned long) > 2 * PyLong_SHIFT) { __PYX_VERIFY_RETURN_INT(int, unsigned long, (((((unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]))) } else if (8 * sizeof(int) >= 2 * PyLong_SHIFT) { return (int) (((((int)digits[1]) << PyLong_SHIFT) | (int)digits[0])); } } break; case 3: if (8 * sizeof(int) > 2 * PyLong_SHIFT) { if (8 * sizeof(unsigned long) > 3 * PyLong_SHIFT) { __PYX_VERIFY_RETURN_INT(int, unsigned long, (((((((unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]))) } else if (8 * sizeof(int) >= 3 * PyLong_SHIFT) { return (int) (((((((int)digits[2]) << PyLong_SHIFT) | (int)digits[1]) << PyLong_SHIFT) | (int)digits[0])); } } break; case 4: if (8 * sizeof(int) > 3 * PyLong_SHIFT) { if (8 * sizeof(unsigned long) > 4 * PyLong_SHIFT) { __PYX_VERIFY_RETURN_INT(int, unsigned long, (((((((((unsigned long)digits[3]) << PyLong_SHIFT) | (unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]))) } else if (8 * sizeof(int) >= 4 * PyLong_SHIFT) { return (int) (((((((((int)digits[3]) << PyLong_SHIFT) | (int)digits[2]) << PyLong_SHIFT) | (int)digits[1]) << PyLong_SHIFT) | (int)digits[0])); } } break; } #endif #if CYTHON_COMPILING_IN_CPYTHON if (unlikely(Py_SIZE(x) < 0)) { goto raise_neg_overflow; } #else { int result = PyObject_RichCompareBool(x, Py_False, Py_LT); if (unlikely(result < 0)) return (int) -1; if (unlikely(result == 1)) goto raise_neg_overflow; } #endif if (sizeof(int) <= sizeof(unsigned long)) { __PYX_VERIFY_RETURN_INT_EXC(int, unsigned long, PyLong_AsUnsignedLong(x)) } else if (sizeof(int) <= sizeof(unsigned PY_LONG_LONG)) { __PYX_VERIFY_RETURN_INT_EXC(int, unsigned PY_LONG_LONG, PyLong_AsUnsignedLongLong(x)) } } else { #if CYTHON_USE_PYLONG_INTERNALS const digit* digits = ((PyLongObject*)x)->ob_digit; switch (Py_SIZE(x)) { case 0: return (int) 0; case -1: __PYX_VERIFY_RETURN_INT(int, sdigit, (sdigit) (-(sdigit)digits[0])) case 1: __PYX_VERIFY_RETURN_INT(int, digit, +digits[0]) case -2: if (8 * sizeof(int) - 1 > 1 * PyLong_SHIFT) { if (8 * sizeof(unsigned long) > 2 * PyLong_SHIFT) { __PYX_VERIFY_RETURN_INT(int, long, -(long) (((((unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]))) } else if (8 * sizeof(int) - 1 > 2 * PyLong_SHIFT) { return (int) (((int)-1)*(((((int)digits[1]) << PyLong_SHIFT) | (int)digits[0]))); } } break; case 2: if (8 * sizeof(int) > 1 * PyLong_SHIFT) { if (8 * sizeof(unsigned long) > 2 * PyLong_SHIFT) { __PYX_VERIFY_RETURN_INT(int, unsigned long, (((((unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]))) } else if (8 * sizeof(int) - 1 > 2 * PyLong_SHIFT) { return (int) ((((((int)digits[1]) << PyLong_SHIFT) | (int)digits[0]))); } } break; case -3: if (8 * sizeof(int) - 1 > 2 * PyLong_SHIFT) { if (8 * sizeof(unsigned long) > 3 * PyLong_SHIFT) { __PYX_VERIFY_RETURN_INT(int, long, -(long) (((((((unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]))) } else if (8 * sizeof(int) - 1 > 3 * PyLong_SHIFT) { return (int) (((int)-1)*(((((((int)digits[2]) << PyLong_SHIFT) | (int)digits[1]) << PyLong_SHIFT) | (int)digits[0]))); } } break; case 3: if (8 * sizeof(int) > 2 * PyLong_SHIFT) { if (8 * sizeof(unsigned long) > 3 * PyLong_SHIFT) { __PYX_VERIFY_RETURN_INT(int, unsigned long, (((((((unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]))) } else if (8 * sizeof(int) - 1 > 3 * PyLong_SHIFT) { return (int) ((((((((int)digits[2]) << PyLong_SHIFT) | (int)digits[1]) << PyLong_SHIFT) | (int)digits[0]))); } } break; case -4: if (8 * sizeof(int) - 1 > 3 * PyLong_SHIFT) { if (8 * sizeof(unsigned long) > 4 * PyLong_SHIFT) { __PYX_VERIFY_RETURN_INT(int, long, -(long) (((((((((unsigned long)digits[3]) << PyLong_SHIFT) | (unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]))) } else if (8 * sizeof(int) - 1 > 4 * PyLong_SHIFT) { return (int) (((int)-1)*(((((((((int)digits[3]) << PyLong_SHIFT) | (int)digits[2]) << PyLong_SHIFT) | (int)digits[1]) << PyLong_SHIFT) | (int)digits[0]))); } } break; case 4: if (8 * sizeof(int) > 3 * PyLong_SHIFT) { if (8 * sizeof(unsigned long) > 4 * PyLong_SHIFT) { __PYX_VERIFY_RETURN_INT(int, unsigned long, (((((((((unsigned long)digits[3]) << PyLong_SHIFT) | (unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]))) } else if (8 * sizeof(int) - 1 > 4 * PyLong_SHIFT) { return (int) ((((((((((int)digits[3]) << PyLong_SHIFT) | (int)digits[2]) << PyLong_SHIFT) | (int)digits[1]) << PyLong_SHIFT) | (int)digits[0]))); } } break; } #endif if (sizeof(int) <= sizeof(long)) { __PYX_VERIFY_RETURN_INT_EXC(int, long, PyLong_AsLong(x)) } else if (sizeof(int) <= sizeof(PY_LONG_LONG)) { __PYX_VERIFY_RETURN_INT_EXC(int, PY_LONG_LONG, PyLong_AsLongLong(x)) } } { #if CYTHON_COMPILING_IN_PYPY && !defined(_PyLong_AsByteArray) PyErr_SetString(PyExc_RuntimeError, "_PyLong_AsByteArray() not available in PyPy, cannot convert large numbers"); #else int val; PyObject *v = __Pyx_PyNumber_IntOrLong(x); #if PY_MAJOR_VERSION < 3 if (likely(v) && !PyLong_Check(v)) { PyObject *tmp = v; v = PyNumber_Long(tmp); Py_DECREF(tmp); } #endif if (likely(v)) { int one = 1; int is_little = (int)*(unsigned char *)&one; unsigned char *bytes = (unsigned char *)&val; int ret = _PyLong_AsByteArray((PyLongObject *)v, bytes, sizeof(val), is_little, !is_unsigned); Py_DECREF(v); if (likely(!ret)) return val; } #endif return (int) -1; } } else { int val; PyObject *tmp = __Pyx_PyNumber_IntOrLong(x); if (!tmp) return (int) -1; val = __Pyx_PyInt_As_int(tmp); Py_DECREF(tmp); return val; } raise_overflow: PyErr_SetString(PyExc_OverflowError, "value too large to convert to int"); return (int) -1; raise_neg_overflow: PyErr_SetString(PyExc_OverflowError, "can't convert negative value to int"); return (int) -1; } /* CIntToPy */ static CYTHON_INLINE PyObject* __Pyx_PyInt_From_long(long value) { const long neg_one = (long) -1, const_zero = (long) 0; const int is_unsigned = neg_one > const_zero; if (is_unsigned) { if (sizeof(long) < sizeof(long)) { return PyInt_FromLong((long) value); } else if (sizeof(long) <= sizeof(unsigned long)) { return PyLong_FromUnsignedLong((unsigned long) value); } else if (sizeof(long) <= sizeof(unsigned PY_LONG_LONG)) { return PyLong_FromUnsignedLongLong((unsigned PY_LONG_LONG) value); } } else { if (sizeof(long) <= sizeof(long)) { return PyInt_FromLong((long) value); } else if (sizeof(long) <= sizeof(PY_LONG_LONG)) { return PyLong_FromLongLong((PY_LONG_LONG) value); } } { int one = 1; int little = (int)*(unsigned char *)&one; unsigned char *bytes = (unsigned char *)&value; return _PyLong_FromByteArray(bytes, sizeof(long), little, !is_unsigned); } } /* CIntFromPy */ static CYTHON_INLINE long __Pyx_PyInt_As_long(PyObject *x) { const long neg_one = (long) -1, const_zero = (long) 0; const int is_unsigned = neg_one > const_zero; #if PY_MAJOR_VERSION < 3 if (likely(PyInt_Check(x))) { if (sizeof(long) < sizeof(long)) { __PYX_VERIFY_RETURN_INT(long, long, PyInt_AS_LONG(x)) } else { long val = PyInt_AS_LONG(x); if (is_unsigned && unlikely(val < 0)) { goto raise_neg_overflow; } return (long) val; } } else #endif if (likely(PyLong_Check(x))) { if (is_unsigned) { #if CYTHON_USE_PYLONG_INTERNALS const digit* digits = ((PyLongObject*)x)->ob_digit; switch (Py_SIZE(x)) { case 0: return (long) 0; case 1: __PYX_VERIFY_RETURN_INT(long, digit, digits[0]) case 2: if (8 * sizeof(long) > 1 * PyLong_SHIFT) { if (8 * sizeof(unsigned long) > 2 * PyLong_SHIFT) { __PYX_VERIFY_RETURN_INT(long, unsigned long, (((((unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]))) } else if (8 * sizeof(long) >= 2 * PyLong_SHIFT) { return (long) (((((long)digits[1]) << PyLong_SHIFT) | (long)digits[0])); } } break; case 3: if (8 * sizeof(long) > 2 * PyLong_SHIFT) { if (8 * sizeof(unsigned long) > 3 * PyLong_SHIFT) { __PYX_VERIFY_RETURN_INT(long, unsigned long, (((((((unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]))) } else if (8 * sizeof(long) >= 3 * PyLong_SHIFT) { return (long) (((((((long)digits[2]) << PyLong_SHIFT) | (long)digits[1]) << PyLong_SHIFT) | (long)digits[0])); } } break; case 4: if (8 * sizeof(long) > 3 * PyLong_SHIFT) { if (8 * sizeof(unsigned long) > 4 * PyLong_SHIFT) { __PYX_VERIFY_RETURN_INT(long, unsigned long, (((((((((unsigned long)digits[3]) << PyLong_SHIFT) | (unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]))) } else if (8 * sizeof(long) >= 4 * PyLong_SHIFT) { return (long) (((((((((long)digits[3]) << PyLong_SHIFT) | (long)digits[2]) << PyLong_SHIFT) | (long)digits[1]) << PyLong_SHIFT) | (long)digits[0])); } } break; } #endif #if CYTHON_COMPILING_IN_CPYTHON if (unlikely(Py_SIZE(x) < 0)) { goto raise_neg_overflow; } #else { int result = PyObject_RichCompareBool(x, Py_False, Py_LT); if (unlikely(result < 0)) return (long) -1; if (unlikely(result == 1)) goto raise_neg_overflow; } #endif if (sizeof(long) <= sizeof(unsigned long)) { __PYX_VERIFY_RETURN_INT_EXC(long, unsigned long, PyLong_AsUnsignedLong(x)) } else if (sizeof(long) <= sizeof(unsigned PY_LONG_LONG)) { __PYX_VERIFY_RETURN_INT_EXC(long, unsigned PY_LONG_LONG, PyLong_AsUnsignedLongLong(x)) } } else { #if CYTHON_USE_PYLONG_INTERNALS const digit* digits = ((PyLongObject*)x)->ob_digit; switch (Py_SIZE(x)) { case 0: return (long) 0; case -1: __PYX_VERIFY_RETURN_INT(long, sdigit, (sdigit) (-(sdigit)digits[0])) case 1: __PYX_VERIFY_RETURN_INT(long, digit, +digits[0]) case -2: if (8 * sizeof(long) - 1 > 1 * PyLong_SHIFT) { if (8 * sizeof(unsigned long) > 2 * PyLong_SHIFT) { __PYX_VERIFY_RETURN_INT(long, long, -(long) (((((unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]))) } else if (8 * sizeof(long) - 1 > 2 * PyLong_SHIFT) { return (long) (((long)-1)*(((((long)digits[1]) << PyLong_SHIFT) | (long)digits[0]))); } } break; case 2: if (8 * sizeof(long) > 1 * PyLong_SHIFT) { if (8 * sizeof(unsigned long) > 2 * PyLong_SHIFT) { __PYX_VERIFY_RETURN_INT(long, unsigned long, (((((unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]))) } else if (8 * sizeof(long) - 1 > 2 * PyLong_SHIFT) { return (long) ((((((long)digits[1]) << PyLong_SHIFT) | (long)digits[0]))); } } break; case -3: if (8 * sizeof(long) - 1 > 2 * PyLong_SHIFT) { if (8 * sizeof(unsigned long) > 3 * PyLong_SHIFT) { __PYX_VERIFY_RETURN_INT(long, long, -(long) (((((((unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]))) } else if (8 * sizeof(long) - 1 > 3 * PyLong_SHIFT) { return (long) (((long)-1)*(((((((long)digits[2]) << PyLong_SHIFT) | (long)digits[1]) << PyLong_SHIFT) | (long)digits[0]))); } } break; case 3: if (8 * sizeof(long) > 2 * PyLong_SHIFT) { if (8 * sizeof(unsigned long) > 3 * PyLong_SHIFT) { __PYX_VERIFY_RETURN_INT(long, unsigned long, (((((((unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]))) } else if (8 * sizeof(long) - 1 > 3 * PyLong_SHIFT) { return (long) ((((((((long)digits[2]) << PyLong_SHIFT) | (long)digits[1]) << PyLong_SHIFT) | (long)digits[0]))); } } break; case -4: if (8 * sizeof(long) - 1 > 3 * PyLong_SHIFT) { if (8 * sizeof(unsigned long) > 4 * PyLong_SHIFT) { __PYX_VERIFY_RETURN_INT(long, long, -(long) (((((((((unsigned long)digits[3]) << PyLong_SHIFT) | (unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]))) } else if (8 * sizeof(long) - 1 > 4 * PyLong_SHIFT) { return (long) (((long)-1)*(((((((((long)digits[3]) << PyLong_SHIFT) | (long)digits[2]) << PyLong_SHIFT) | (long)digits[1]) << PyLong_SHIFT) | (long)digits[0]))); } } break; case 4: if (8 * sizeof(long) > 3 * PyLong_SHIFT) { if (8 * sizeof(unsigned long) > 4 * PyLong_SHIFT) { __PYX_VERIFY_RETURN_INT(long, unsigned long, (((((((((unsigned long)digits[3]) << PyLong_SHIFT) | (unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]))) } else if (8 * sizeof(long) - 1 > 4 * PyLong_SHIFT) { return (long) ((((((((((long)digits[3]) << PyLong_SHIFT) | (long)digits[2]) << PyLong_SHIFT) | (long)digits[1]) << PyLong_SHIFT) | (long)digits[0]))); } } break; } #endif if (sizeof(long) <= sizeof(long)) { __PYX_VERIFY_RETURN_INT_EXC(long, long, PyLong_AsLong(x)) } else if (sizeof(long) <= sizeof(PY_LONG_LONG)) { __PYX_VERIFY_RETURN_INT_EXC(long, PY_LONG_LONG, PyLong_AsLongLong(x)) } } { #if CYTHON_COMPILING_IN_PYPY && !defined(_PyLong_AsByteArray) PyErr_SetString(PyExc_RuntimeError, "_PyLong_AsByteArray() not available in PyPy, cannot convert large numbers"); #else long val; PyObject *v = __Pyx_PyNumber_IntOrLong(x); #if PY_MAJOR_VERSION < 3 if (likely(v) && !PyLong_Check(v)) { PyObject *tmp = v; v = PyNumber_Long(tmp); Py_DECREF(tmp); } #endif if (likely(v)) { int one = 1; int is_little = (int)*(unsigned char *)&one; unsigned char *bytes = (unsigned char *)&val; int ret = _PyLong_AsByteArray((PyLongObject *)v, bytes, sizeof(val), is_little, !is_unsigned); Py_DECREF(v); if (likely(!ret)) return val; } #endif return (long) -1; } } else { long val; PyObject *tmp = __Pyx_PyNumber_IntOrLong(x); if (!tmp) return (long) -1; val = __Pyx_PyInt_As_long(tmp); Py_DECREF(tmp); return val; } raise_overflow: PyErr_SetString(PyExc_OverflowError, "value too large to convert to long"); return (long) -1; raise_neg_overflow: PyErr_SetString(PyExc_OverflowError, "can't convert negative value to long"); return (long) -1; } /* CheckBinaryVersion */ static int __Pyx_check_binary_version(void) { char ctversion[4], rtversion[4]; PyOS_snprintf(ctversion, 4, "%d.%d", PY_MAJOR_VERSION, PY_MINOR_VERSION); PyOS_snprintf(rtversion, 4, "%s", Py_GetVersion()); if (ctversion[0] != rtversion[0] || ctversion[2] != rtversion[2]) { char message[200]; PyOS_snprintf(message, sizeof(message), "compiletime version %s of module '%.100s' " "does not match runtime version %s", ctversion, __Pyx_MODULE_NAME, rtversion); return PyErr_WarnEx(NULL, message, 1); } return 0; } /* ModuleImport */ #ifndef __PYX_HAVE_RT_ImportModule #define __PYX_HAVE_RT_ImportModule static PyObject *__Pyx_ImportModule(const char *name) { PyObject *py_name = 0; PyObject *py_module = 0; py_name = __Pyx_PyIdentifier_FromString(name); if (!py_name) goto bad; py_module = PyImport_Import(py_name); Py_DECREF(py_name); return py_module; bad: Py_XDECREF(py_name); return 0; } #endif /* TypeImport */ #ifndef __PYX_HAVE_RT_ImportType #define __PYX_HAVE_RT_ImportType static PyTypeObject *__Pyx_ImportType(const char *module_name, const char *class_name, size_t size, int strict) { PyObject *py_module = 0; PyObject *result = 0; PyObject *py_name = 0; char warning[200]; Py_ssize_t basicsize; #ifdef Py_LIMITED_API PyObject *py_basicsize; #endif py_module = __Pyx_ImportModule(module_name); if (!py_module) goto bad; py_name = __Pyx_PyIdentifier_FromString(class_name); if (!py_name) goto bad; result = PyObject_GetAttr(py_module, py_name); Py_DECREF(py_name); py_name = 0; Py_DECREF(py_module); py_module = 0; if (!result) goto bad; if (!PyType_Check(result)) { PyErr_Format(PyExc_TypeError, "%.200s.%.200s is not a type object", module_name, class_name); goto bad; } #ifndef Py_LIMITED_API basicsize = ((PyTypeObject *)result)->tp_basicsize; #else py_basicsize = PyObject_GetAttrString(result, "__basicsize__"); if (!py_basicsize) goto bad; basicsize = PyLong_AsSsize_t(py_basicsize); Py_DECREF(py_basicsize); py_basicsize = 0; if (basicsize == (Py_ssize_t)-1 && PyErr_Occurred()) goto bad; #endif if (!strict && (size_t)basicsize > size) { PyOS_snprintf(warning, sizeof(warning), "%s.%s size changed, may indicate binary incompatibility. Expected %zd, got %zd", module_name, class_name, basicsize, size); if (PyErr_WarnEx(NULL, warning, 0) < 0) goto bad; } else if ((size_t)basicsize != size) { PyErr_Format(PyExc_ValueError, "%.200s.%.200s has the wrong size, try recompiling. Expected %zd, got %zd", module_name, class_name, basicsize, size); goto bad; } return (PyTypeObject *)result; bad: Py_XDECREF(py_module); Py_XDECREF(result); return NULL; } #endif /* InitStrings */ static int __Pyx_InitStrings(__Pyx_StringTabEntry *t) { while (t->p) { #if PY_MAJOR_VERSION < 3 if (t->is_unicode) { *t->p = PyUnicode_DecodeUTF8(t->s, t->n - 1, NULL); } else if (t->intern) { *t->p = PyString_InternFromString(t->s); } else { *t->p = PyString_FromStringAndSize(t->s, t->n - 1); } #else if (t->is_unicode | t->is_str) { if (t->intern) { *t->p = PyUnicode_InternFromString(t->s); } else if (t->encoding) { *t->p = PyUnicode_Decode(t->s, t->n - 1, t->encoding, NULL); } else { *t->p = PyUnicode_FromStringAndSize(t->s, t->n - 1); } } else { *t->p = PyBytes_FromStringAndSize(t->s, t->n - 1); } #endif if (!*t->p) return -1; ++t; } return 0; } static CYTHON_INLINE PyObject* __Pyx_PyUnicode_FromString(const char* c_str) { return __Pyx_PyUnicode_FromStringAndSize(c_str, (Py_ssize_t)strlen(c_str)); } static CYTHON_INLINE char* __Pyx_PyObject_AsString(PyObject* o) { Py_ssize_t ignore; return __Pyx_PyObject_AsStringAndSize(o, &ignore); } static CYTHON_INLINE char* __Pyx_PyObject_AsStringAndSize(PyObject* o, Py_ssize_t *length) { #if CYTHON_COMPILING_IN_CPYTHON && (__PYX_DEFAULT_STRING_ENCODING_IS_ASCII || __PYX_DEFAULT_STRING_ENCODING_IS_DEFAULT) if ( #if PY_MAJOR_VERSION < 3 && __PYX_DEFAULT_STRING_ENCODING_IS_ASCII __Pyx_sys_getdefaultencoding_not_ascii && #endif PyUnicode_Check(o)) { #if PY_VERSION_HEX < 0x03030000 char* defenc_c; PyObject* defenc = _PyUnicode_AsDefaultEncodedString(o, NULL); if (!defenc) return NULL; defenc_c = PyBytes_AS_STRING(defenc); #if __PYX_DEFAULT_STRING_ENCODING_IS_ASCII { char* end = defenc_c + PyBytes_GET_SIZE(defenc); char* c; for (c = defenc_c; c < end; c++) { if ((unsigned char) (*c) >= 128) { PyUnicode_AsASCIIString(o); return NULL; } } } #endif *length = PyBytes_GET_SIZE(defenc); return defenc_c; #else if (__Pyx_PyUnicode_READY(o) == -1) return NULL; #if __PYX_DEFAULT_STRING_ENCODING_IS_ASCII if (PyUnicode_IS_ASCII(o)) { *length = PyUnicode_GET_LENGTH(o); return PyUnicode_AsUTF8(o); } else { PyUnicode_AsASCIIString(o); return NULL; } #else return PyUnicode_AsUTF8AndSize(o, length); #endif #endif } else #endif #if (!CYTHON_COMPILING_IN_PYPY) || (defined(PyByteArray_AS_STRING) && defined(PyByteArray_GET_SIZE)) if (PyByteArray_Check(o)) { *length = PyByteArray_GET_SIZE(o); return PyByteArray_AS_STRING(o); } else #endif { char* result; int r = PyBytes_AsStringAndSize(o, &result, length); if (unlikely(r < 0)) { return NULL; } else { return result; } } } static CYTHON_INLINE int __Pyx_PyObject_IsTrue(PyObject* x) { int is_true = x == Py_True; if (is_true | (x == Py_False) | (x == Py_None)) return is_true; else return PyObject_IsTrue(x); } static CYTHON_INLINE PyObject* __Pyx_PyNumber_IntOrLong(PyObject* x) { PyNumberMethods *m; const char *name = NULL; PyObject *res = NULL; #if PY_MAJOR_VERSION < 3 if (PyInt_Check(x) || PyLong_Check(x)) #else if (PyLong_Check(x)) #endif return __Pyx_NewRef(x); m = Py_TYPE(x)->tp_as_number; #if PY_MAJOR_VERSION < 3 if (m && m->nb_int) { name = "int"; res = PyNumber_Int(x); } else if (m && m->nb_long) { name = "long"; res = PyNumber_Long(x); } #else if (m && m->nb_int) { name = "int"; res = PyNumber_Long(x); } #endif if (res) { #if PY_MAJOR_VERSION < 3 if (!PyInt_Check(res) && !PyLong_Check(res)) { #else if (!PyLong_Check(res)) { #endif PyErr_Format(PyExc_TypeError, "__%.4s__ returned non-%.4s (type %.200s)", name, name, Py_TYPE(res)->tp_name); Py_DECREF(res); return NULL; } } else if (!PyErr_Occurred()) { PyErr_SetString(PyExc_TypeError, "an integer is required"); } return res; } static CYTHON_INLINE Py_ssize_t __Pyx_PyIndex_AsSsize_t(PyObject* b) { Py_ssize_t ival; PyObject *x; #if PY_MAJOR_VERSION < 3 if (likely(PyInt_CheckExact(b))) { if (sizeof(Py_ssize_t) >= sizeof(long)) return PyInt_AS_LONG(b); else return PyInt_AsSsize_t(x); } #endif if (likely(PyLong_CheckExact(b))) { #if CYTHON_USE_PYLONG_INTERNALS const digit* digits = ((PyLongObject*)b)->ob_digit; const Py_ssize_t size = Py_SIZE(b); if (likely(__Pyx_sst_abs(size) <= 1)) { ival = likely(size) ? digits[0] : 0; if (size == -1) ival = -ival; return ival; } else { switch (size) { case 2: if (8 * sizeof(Py_ssize_t) > 2 * PyLong_SHIFT) { return (Py_ssize_t) (((((size_t)digits[1]) << PyLong_SHIFT) | (size_t)digits[0])); } break; case -2: if (8 * sizeof(Py_ssize_t) > 2 * PyLong_SHIFT) { return -(Py_ssize_t) (((((size_t)digits[1]) << PyLong_SHIFT) | (size_t)digits[0])); } break; case 3: if (8 * sizeof(Py_ssize_t) > 3 * PyLong_SHIFT) { return (Py_ssize_t) (((((((size_t)digits[2]) << PyLong_SHIFT) | (size_t)digits[1]) << PyLong_SHIFT) | (size_t)digits[0])); } break; case -3: if (8 * sizeof(Py_ssize_t) > 3 * PyLong_SHIFT) { return -(Py_ssize_t) (((((((size_t)digits[2]) << PyLong_SHIFT) | (size_t)digits[1]) << PyLong_SHIFT) | (size_t)digits[0])); } break; case 4: if (8 * sizeof(Py_ssize_t) > 4 * PyLong_SHIFT) { return (Py_ssize_t) (((((((((size_t)digits[3]) << PyLong_SHIFT) | (size_t)digits[2]) << PyLong_SHIFT) | (size_t)digits[1]) << PyLong_SHIFT) | (size_t)digits[0])); } break; case -4: if (8 * sizeof(Py_ssize_t) > 4 * PyLong_SHIFT) { return -(Py_ssize_t) (((((((((size_t)digits[3]) << PyLong_SHIFT) | (size_t)digits[2]) << PyLong_SHIFT) | (size_t)digits[1]) << PyLong_SHIFT) | (size_t)digits[0])); } break; } } #endif return PyLong_AsSsize_t(b); } x = PyNumber_Index(b); if (!x) return -1; ival = PyInt_AsSsize_t(x); Py_DECREF(x); return ival; } static CYTHON_INLINE PyObject * __Pyx_PyInt_FromSize_t(size_t ival) { return PyInt_FromSize_t(ival); } #endif /* Py_PYTHON_H */
d4d75da6774cffba6ee472656e43389e87298d88.cu
// ------------------------------------------------------------------ // Flow-Guided Feature Aggregation // Copyright (c) 2017 Microsoft // Licensed under The MIT License // Written by Yuwen Xiong // ------------------------------------------------------------------ // Based on: // Faster R-CNN // Copyright (c) 2015 Microsoft // Licensed under The MIT License // https://github.com/shaoqingren/faster_rcnn // ------------------------------------------------------------------ //#include "gpu_nms.hpp" #include <vector> #include <iostream> #define CUDA_CHECK(condition) \ /* Code block avoids redefinition of cudaError_t error */ \ do { \ cudaError_t error = condition; \ if (error != cudaSuccess) { \ std::cout << cudaGetErrorString(error) << std::endl; \ } \ } while (0) #define DIVUP(m,n) ((m) / (n) + ((m) % (n) > 0)) int const threadsPerBlock = sizeof(unsigned long long) * 8; __device__ inline float devIoU(float const * const a, float const * const b) { float left = max(a[0], b[0]), right = min(a[2], b[2]); float top = max(a[1], b[1]), bottom = min(a[3], b[3]); float width = max(right - left + 1, 0.f), height = max(bottom - top + 1, 0.f); float interS = width * height; float Sa = (a[2] - a[0] + 1) * (a[3] - a[1] + 1); float Sb = (b[2] - b[0] + 1) * (b[3] - b[1] + 1); return interS / (Sa + Sb - interS); } __global__ void nms_kernel(const int n_boxes, const float nms_overlap_thresh, const float *dev_boxes, unsigned long long *dev_mask) { const int row_start = blockIdx.y; const int col_start = blockIdx.x; // if (row_start > col_start) return; const int row_size = min(n_boxes - row_start * threadsPerBlock, threadsPerBlock); const int col_size = min(n_boxes - col_start * threadsPerBlock, threadsPerBlock); __shared__ float block_boxes[threadsPerBlock * 5]; if (threadIdx.x < col_size) { block_boxes[threadIdx.x * 5 + 0] = dev_boxes[(threadsPerBlock * col_start + threadIdx.x) * 5 + 0]; block_boxes[threadIdx.x * 5 + 1] = dev_boxes[(threadsPerBlock * col_start + threadIdx.x) * 5 + 1]; block_boxes[threadIdx.x * 5 + 2] = dev_boxes[(threadsPerBlock * col_start + threadIdx.x) * 5 + 2]; block_boxes[threadIdx.x * 5 + 3] = dev_boxes[(threadsPerBlock * col_start + threadIdx.x) * 5 + 3]; block_boxes[threadIdx.x * 5 + 4] = dev_boxes[(threadsPerBlock * col_start + threadIdx.x) * 5 + 4]; } __syncthreads(); if (threadIdx.x < row_size) { const int cur_box_idx = threadsPerBlock * row_start + threadIdx.x; const float *cur_box = dev_boxes + cur_box_idx * 5; int i = 0; unsigned long long t = 0; int start = 0; if (row_start == col_start) { start = threadIdx.x + 1; } for (i = start; i < col_size; i++) { if (devIoU(cur_box, block_boxes + i * 5) > nms_overlap_thresh) { t |= 1ULL << i; } } const int col_blocks = DIVUP(n_boxes, threadsPerBlock); dev_mask[cur_box_idx * col_blocks + col_start] = t; } } void _set_device(int device_id) { int current_device; CUDA_CHECK(cudaGetDevice(&current_device)); if (current_device == device_id) { return; } // The call to cudaSetDevice must come before any calls to Get, which // may perform initialization using the GPU. CUDA_CHECK(cudaSetDevice(device_id)); } void _nms(long* keep_out, int* num_out, const float* boxes_host, int boxes_num, int boxes_dim, float nms_overlap_thresh, int device_id) { _set_device(device_id); float* boxes_dev = NULL; unsigned long long* mask_dev = NULL; const int col_blocks = DIVUP(boxes_num, threadsPerBlock); CUDA_CHECK(cudaMalloc(&boxes_dev, boxes_num * boxes_dim * sizeof(float))); CUDA_CHECK(cudaMemcpy(boxes_dev, boxes_host, boxes_num * boxes_dim * sizeof(float), cudaMemcpyHostToDevice)); CUDA_CHECK(cudaMalloc(&mask_dev, boxes_num * col_blocks * sizeof(unsigned long long))); dim3 blocks(DIVUP(boxes_num, threadsPerBlock), DIVUP(boxes_num, threadsPerBlock)); dim3 threads(threadsPerBlock); nms_kernel<<<blocks, threads>>>(boxes_num, nms_overlap_thresh, boxes_dev, mask_dev); std::vector<unsigned long long> mask_host(boxes_num * col_blocks); CUDA_CHECK(cudaMemcpy(&mask_host[0], mask_dev, sizeof(unsigned long long) * boxes_num * col_blocks, cudaMemcpyDeviceToHost)); std::vector<unsigned long long> remv(col_blocks); memset(&remv[0], 0, sizeof(unsigned long long) * col_blocks); int num_to_keep = 0; for (int i = 0; i < boxes_num; i++) { int nblock = i / threadsPerBlock; int inblock = i % threadsPerBlock; if (!(remv[nblock] & (1ULL << inblock))) { keep_out[num_to_keep++] = i; unsigned long long *p = &mask_host[0] + i * col_blocks; for (int j = nblock; j < col_blocks; j++) { remv[j] |= p[j]; } } } *num_out = num_to_keep; CUDA_CHECK(cudaFree(boxes_dev)); CUDA_CHECK(cudaFree(mask_dev)); } /* Generated by Cython 0.24 */ #define PY_SSIZE_T_CLEAN #include "Python.h" #ifndef Py_PYTHON_H #error Python headers needed to compile C extensions, please install development version of Python. #elif PY_VERSION_HEX < 0x02060000 || (0x03000000 <= PY_VERSION_HEX && PY_VERSION_HEX < 0x03020000) #error Cython requires Python 2.6+ or Python 3.2+. #else #define CYTHON_ABI "0_24" #include <stddef.h> #ifndef offsetof #define offsetof(type, member) ( (size_t) & ((type*)0) -> member ) #endif #if !defined(WIN32) && !defined(MS_WINDOWS) #ifndef __stdcall #define __stdcall #endif #ifndef __cdecl #define __cdecl #endif #ifndef __fastcall #define __fastcall #endif #endif #ifndef DL_IMPORT #define DL_IMPORT(t) t #endif #ifndef DL_EXPORT #define DL_EXPORT(t) t #endif #ifndef PY_LONG_LONG #define PY_LONG_LONG LONG_LONG #endif #ifndef Py_HUGE_VAL #define Py_HUGE_VAL HUGE_VAL #endif #ifdef PYPY_VERSION #define CYTHON_COMPILING_IN_PYPY 1 #define CYTHON_COMPILING_IN_CPYTHON 0 #else #define CYTHON_COMPILING_IN_PYPY 0 #define CYTHON_COMPILING_IN_CPYTHON 1 #endif #if !defined(CYTHON_USE_PYLONG_INTERNALS) && CYTHON_COMPILING_IN_CPYTHON && PY_VERSION_HEX >= 0x02070000 #define CYTHON_USE_PYLONG_INTERNALS 1 #endif #if CYTHON_USE_PYLONG_INTERNALS #include "longintrepr.h" #undef SHIFT #undef BASE #undef MASK #endif #if CYTHON_COMPILING_IN_PYPY && PY_VERSION_HEX < 0x02070600 && !defined(Py_OptimizeFlag) #define Py_OptimizeFlag 0 #endif #define __PYX_BUILD_PY_SSIZE_T "n" #define CYTHON_FORMAT_SSIZE_T "z" #if PY_MAJOR_VERSION < 3 #define __Pyx_BUILTIN_MODULE_NAME "__builtin__" #define __Pyx_PyCode_New(a, k, l, s, f, code, c, n, v, fv, cell, fn, name, fline, lnos)\ PyCode_New(a+k, l, s, f, code, c, n, v, fv, cell, fn, name, fline, lnos) #define __Pyx_DefaultClassType PyClass_Type #else #define __Pyx_BUILTIN_MODULE_NAME "builtins" #define __Pyx_PyCode_New(a, k, l, s, f, code, c, n, v, fv, cell, fn, name, fline, lnos)\ PyCode_New(a, k, l, s, f, code, c, n, v, fv, cell, fn, name, fline, lnos) #define __Pyx_DefaultClassType PyType_Type #endif #ifndef Py_TPFLAGS_CHECKTYPES #define Py_TPFLAGS_CHECKTYPES 0 #endif #ifndef Py_TPFLAGS_HAVE_INDEX #define Py_TPFLAGS_HAVE_INDEX 0 #endif #ifndef Py_TPFLAGS_HAVE_NEWBUFFER #define Py_TPFLAGS_HAVE_NEWBUFFER 0 #endif #ifndef Py_TPFLAGS_HAVE_FINALIZE #define Py_TPFLAGS_HAVE_FINALIZE 0 #endif #if PY_VERSION_HEX > 0x03030000 && defined(PyUnicode_KIND) #define CYTHON_PEP393_ENABLED 1 #define __Pyx_PyUnicode_READY(op) (likely(PyUnicode_IS_READY(op)) ?\ 0 : _PyUnicode_Ready((PyObject *)(op))) #define __Pyx_PyUnicode_GET_LENGTH(u) PyUnicode_GET_LENGTH(u) #define __Pyx_PyUnicode_READ_CHAR(u, i) PyUnicode_READ_CHAR(u, i) #define __Pyx_PyUnicode_KIND(u) PyUnicode_KIND(u) #define __Pyx_PyUnicode_DATA(u) PyUnicode_DATA(u) #define __Pyx_PyUnicode_READ(k, d, i) PyUnicode_READ(k, d, i) #define __Pyx_PyUnicode_IS_TRUE(u) (0 != (likely(PyUnicode_IS_READY(u)) ? PyUnicode_GET_LENGTH(u) : PyUnicode_GET_SIZE(u))) #else #define CYTHON_PEP393_ENABLED 0 #define __Pyx_PyUnicode_READY(op) (0) #define __Pyx_PyUnicode_GET_LENGTH(u) PyUnicode_GET_SIZE(u) #define __Pyx_PyUnicode_READ_CHAR(u, i) ((Py_UCS4)(PyUnicode_AS_UNICODE(u)[i])) #define __Pyx_PyUnicode_KIND(u) (sizeof(Py_UNICODE)) #define __Pyx_PyUnicode_DATA(u) ((void*)PyUnicode_AS_UNICODE(u)) #define __Pyx_PyUnicode_READ(k, d, i) ((void)(k), (Py_UCS4)(((Py_UNICODE*)d)[i])) #define __Pyx_PyUnicode_IS_TRUE(u) (0 != PyUnicode_GET_SIZE(u)) #endif #if CYTHON_COMPILING_IN_PYPY #define __Pyx_PyUnicode_Concat(a, b) PyNumber_Add(a, b) #define __Pyx_PyUnicode_ConcatSafe(a, b) PyNumber_Add(a, b) #else #define __Pyx_PyUnicode_Concat(a, b) PyUnicode_Concat(a, b) #define __Pyx_PyUnicode_ConcatSafe(a, b) ((unlikely((a) == Py_None) || unlikely((b) == Py_None)) ?\ PyNumber_Add(a, b) : __Pyx_PyUnicode_Concat(a, b)) #endif #if CYTHON_COMPILING_IN_PYPY && !defined(PyUnicode_Contains) #define PyUnicode_Contains(u, s) PySequence_Contains(u, s) #endif #if CYTHON_COMPILING_IN_PYPY && !defined(PyObject_Format) #define PyObject_Format(obj, fmt) PyObject_CallMethod(obj, "__format__", "O", fmt) #endif #if CYTHON_COMPILING_IN_PYPY && !defined(PyObject_Malloc) #define PyObject_Malloc(s) PyMem_Malloc(s) #define PyObject_Free(p) PyMem_Free(p) #define PyObject_Realloc(p) PyMem_Realloc(p) #endif #define __Pyx_PyString_FormatSafe(a, b) ((unlikely((a) == Py_None)) ? PyNumber_Remainder(a, b) : __Pyx_PyString_Format(a, b)) #define __Pyx_PyUnicode_FormatSafe(a, b) ((unlikely((a) == Py_None)) ? PyNumber_Remainder(a, b) : PyUnicode_Format(a, b)) #if PY_MAJOR_VERSION >= 3 #define __Pyx_PyString_Format(a, b) PyUnicode_Format(a, b) #else #define __Pyx_PyString_Format(a, b) PyString_Format(a, b) #endif #if PY_MAJOR_VERSION < 3 && !defined(PyObject_ASCII) #define PyObject_ASCII(o) PyObject_Repr(o) #endif #if PY_MAJOR_VERSION >= 3 #define PyBaseString_Type PyUnicode_Type #define PyStringObject PyUnicodeObject #define PyString_Type PyUnicode_Type #define PyString_Check PyUnicode_Check #define PyString_CheckExact PyUnicode_CheckExact #endif #if PY_MAJOR_VERSION >= 3 #define __Pyx_PyBaseString_Check(obj) PyUnicode_Check(obj) #define __Pyx_PyBaseString_CheckExact(obj) PyUnicode_CheckExact(obj) #else #define __Pyx_PyBaseString_Check(obj) (PyString_Check(obj) || PyUnicode_Check(obj)) #define __Pyx_PyBaseString_CheckExact(obj) (PyString_CheckExact(obj) || PyUnicode_CheckExact(obj)) #endif #ifndef PySet_CheckExact #define PySet_CheckExact(obj) (Py_TYPE(obj) == &PySet_Type) #endif #define __Pyx_TypeCheck(obj, type) PyObject_TypeCheck(obj, (PyTypeObject *)type) #if PY_MAJOR_VERSION >= 3 #define PyIntObject PyLongObject #define PyInt_Type PyLong_Type #define PyInt_Check(op) PyLong_Check(op) #define PyInt_CheckExact(op) PyLong_CheckExact(op) #define PyInt_FromString PyLong_FromString #define PyInt_FromUnicode PyLong_FromUnicode #define PyInt_FromLong PyLong_FromLong #define PyInt_FromSize_t PyLong_FromSize_t #define PyInt_FromSsize_t PyLong_FromSsize_t #define PyInt_AsLong PyLong_AsLong #define PyInt_AS_LONG PyLong_AS_LONG #define PyInt_AsSsize_t PyLong_AsSsize_t #define PyInt_AsUnsignedLongMask PyLong_AsUnsignedLongMask #define PyInt_AsUnsignedLongLongMask PyLong_AsUnsignedLongLongMask #define PyNumber_Int PyNumber_Long #endif #if PY_MAJOR_VERSION >= 3 #define PyBoolObject PyLongObject #endif #if PY_MAJOR_VERSION >= 3 && CYTHON_COMPILING_IN_PYPY #ifndef PyUnicode_InternFromString #define PyUnicode_InternFromString(s) PyUnicode_FromString(s) #endif #endif #if PY_VERSION_HEX < 0x030200A4 typedef long Py_hash_t; #define __Pyx_PyInt_FromHash_t PyInt_FromLong #define __Pyx_PyInt_AsHash_t PyInt_AsLong #else #define __Pyx_PyInt_FromHash_t PyInt_FromSsize_t #define __Pyx_PyInt_AsHash_t PyInt_AsSsize_t #endif #if PY_MAJOR_VERSION >= 3 #define __Pyx_PyMethod_New(func, self, klass) ((self) ? PyMethod_New(func, self) : PyInstanceMethod_New(func)) #else #define __Pyx_PyMethod_New(func, self, klass) PyMethod_New(func, self, klass) #endif #if PY_VERSION_HEX >= 0x030500B1 #define __Pyx_PyAsyncMethodsStruct PyAsyncMethods #define __Pyx_PyType_AsAsync(obj) (Py_TYPE(obj)->tp_as_async) #elif CYTHON_COMPILING_IN_CPYTHON && PY_MAJOR_VERSION >= 3 typedef struct { unaryfunc am_await; unaryfunc am_aiter; unaryfunc am_anext; } __Pyx_PyAsyncMethodsStruct; #define __Pyx_PyType_AsAsync(obj) ((__Pyx_PyAsyncMethodsStruct*) (Py_TYPE(obj)->tp_reserved)) #else #define __Pyx_PyType_AsAsync(obj) NULL #endif #ifndef CYTHON_RESTRICT #if defined(__GNUC__) #define CYTHON_RESTRICT __restrict__ #elif defined(_MSC_VER) && _MSC_VER >= 1400 #define CYTHON_RESTRICT __restrict #elif defined (__STDC_VERSION__) && __STDC_VERSION__ >= 199901L #define CYTHON_RESTRICT restrict #else #define CYTHON_RESTRICT #endif #endif #define __Pyx_void_to_None(void_result) ((void)(void_result), Py_INCREF(Py_None), Py_None) #ifndef __cplusplus #error "Cython files generated with the C++ option must be compiled with a C++ compiler." #endif #ifndef CYTHON_INLINE #define CYTHON_INLINE inline #endif template<typename T> void __Pyx_call_destructor(T& x) { x.~T(); } template<typename T> class __Pyx_FakeReference { public: __Pyx_FakeReference() : ptr(NULL) { } __Pyx_FakeReference(const T& ref) : ptr(const_cast<T*>(&ref)) { } T *operator->() { return ptr; } operator T&() { return *ptr; } private: T *ptr; }; #if defined(WIN32) || defined(MS_WINDOWS) #define _USE_MATH_DEFINES #endif #include <math.h> #ifdef NAN #define __PYX_NAN() ((float) NAN) #else static CYTHON_INLINE float __PYX_NAN() { float value; memset(&value, 0xFF, sizeof(value)); return value; } #endif #define __PYX_ERR(f_index, lineno, Ln_error) \ { \ __pyx_filename = __pyx_f[f_index]; __pyx_lineno = lineno; __pyx_clineno = __LINE__; goto Ln_error; \ } #if PY_MAJOR_VERSION >= 3 #define __Pyx_PyNumber_Divide(x,y) PyNumber_TrueDivide(x,y) #define __Pyx_PyNumber_InPlaceDivide(x,y) PyNumber_InPlaceTrueDivide(x,y) #else #define __Pyx_PyNumber_Divide(x,y) PyNumber_Divide(x,y) #define __Pyx_PyNumber_InPlaceDivide(x,y) PyNumber_InPlaceDivide(x,y) #endif #ifndef __PYX_EXTERN_C #ifdef __cplusplus #define __PYX_EXTERN_C extern "C" #else #define __PYX_EXTERN_C extern #endif #endif #define __PYX_HAVE__nms__gpu_nms #define __PYX_HAVE_API__nms__gpu_nms #include "string.h" #include "stdio.h" #include "stdlib.h" #include "numpy/arrayobject.h" #include "numpy/ufuncobject.h" #include "gpu_nms.hpp" #ifdef _OPENMP #include <omp.h> #endif /* _OPENMP */ #ifdef PYREX_WITHOUT_ASSERTIONS #define CYTHON_WITHOUT_ASSERTIONS #endif #ifndef CYTHON_UNUSED # if defined(__GNUC__) # if !(defined(__cplusplus)) || (__GNUC__ > 3 || (__GNUC__ == 3 && __GNUC_MINOR__ >= 4)) # define CYTHON_UNUSED __attribute__ ((__unused__)) # else # define CYTHON_UNUSED # endif # elif defined(__ICC) || (defined(__INTEL_COMPILER) && !defined(_MSC_VER)) # define CYTHON_UNUSED __attribute__ ((__unused__)) # else # define CYTHON_UNUSED # endif #endif #ifndef CYTHON_NCP_UNUSED # if CYTHON_COMPILING_IN_CPYTHON # define CYTHON_NCP_UNUSED # else # define CYTHON_NCP_UNUSED CYTHON_UNUSED # endif #endif typedef struct {PyObject **p; const char *s; const Py_ssize_t n; const char* encoding; const char is_unicode; const char is_str; const char intern; } __Pyx_StringTabEntry; #define __PYX_DEFAULT_STRING_ENCODING_IS_ASCII 0 #define __PYX_DEFAULT_STRING_ENCODING_IS_DEFAULT 0 #define __PYX_DEFAULT_STRING_ENCODING "" #define __Pyx_PyObject_FromString __Pyx_PyBytes_FromString #define __Pyx_PyObject_FromStringAndSize __Pyx_PyBytes_FromStringAndSize #define __Pyx_uchar_cast(c) ((unsigned char)c) #define __Pyx_long_cast(x) ((long)x) #define __Pyx_fits_Py_ssize_t(v, type, is_signed) (\ (sizeof(type) < sizeof(Py_ssize_t)) ||\ (sizeof(type) > sizeof(Py_ssize_t) &&\ likely(v < (type)PY_SSIZE_T_MAX ||\ v == (type)PY_SSIZE_T_MAX) &&\ (!is_signed || likely(v > (type)PY_SSIZE_T_MIN ||\ v == (type)PY_SSIZE_T_MIN))) ||\ (sizeof(type) == sizeof(Py_ssize_t) &&\ (is_signed || likely(v < (type)PY_SSIZE_T_MAX ||\ v == (type)PY_SSIZE_T_MAX))) ) #if defined (__cplusplus) && __cplusplus >= 201103L #include <cstdlib> #define __Pyx_sst_abs(value) std::abs(value) #elif SIZEOF_INT >= SIZEOF_SIZE_T #define __Pyx_sst_abs(value) abs(value) #elif SIZEOF_LONG >= SIZEOF_SIZE_T #define __Pyx_sst_abs(value) labs(value) #elif defined (_MSC_VER) && defined (_M_X64) #define __Pyx_sst_abs(value) _abs64(value) #elif defined (__STDC_VERSION__) && __STDC_VERSION__ >= 199901L #define __Pyx_sst_abs(value) llabs(value) #elif defined (__GNUC__) #define __Pyx_sst_abs(value) __builtin_llabs(value) #else #define __Pyx_sst_abs(value) ((value<0) ? -value : value) #endif static CYTHON_INLINE char* __Pyx_PyObject_AsString(PyObject*); static CYTHON_INLINE char* __Pyx_PyObject_AsStringAndSize(PyObject*, Py_ssize_t* length); #define __Pyx_PyByteArray_FromString(s) PyByteArray_FromStringAndSize((const char*)s, strlen((const char*)s)) #define __Pyx_PyByteArray_FromStringAndSize(s, l) PyByteArray_FromStringAndSize((const char*)s, l) #define __Pyx_PyBytes_FromString PyBytes_FromString #define __Pyx_PyBytes_FromStringAndSize PyBytes_FromStringAndSize static CYTHON_INLINE PyObject* __Pyx_PyUnicode_FromString(const char*); #if PY_MAJOR_VERSION < 3 #define __Pyx_PyStr_FromString __Pyx_PyBytes_FromString #define __Pyx_PyStr_FromStringAndSize __Pyx_PyBytes_FromStringAndSize #else #define __Pyx_PyStr_FromString __Pyx_PyUnicode_FromString #define __Pyx_PyStr_FromStringAndSize __Pyx_PyUnicode_FromStringAndSize #endif #define __Pyx_PyObject_AsSString(s) ((signed char*) __Pyx_PyObject_AsString(s)) #define __Pyx_PyObject_AsUString(s) ((unsigned char*) __Pyx_PyObject_AsString(s)) #define __Pyx_PyObject_FromCString(s) __Pyx_PyObject_FromString((const char*)s) #define __Pyx_PyBytes_FromCString(s) __Pyx_PyBytes_FromString((const char*)s) #define __Pyx_PyByteArray_FromCString(s) __Pyx_PyByteArray_FromString((const char*)s) #define __Pyx_PyStr_FromCString(s) __Pyx_PyStr_FromString((const char*)s) #define __Pyx_PyUnicode_FromCString(s) __Pyx_PyUnicode_FromString((const char*)s) #if PY_MAJOR_VERSION < 3 static CYTHON_INLINE size_t __Pyx_Py_UNICODE_strlen(const Py_UNICODE *u) { const Py_UNICODE *u_end = u; while (*u_end++) ; return (size_t)(u_end - u - 1); } #else #define __Pyx_Py_UNICODE_strlen Py_UNICODE_strlen #endif #define __Pyx_PyUnicode_FromUnicode(u) PyUnicode_FromUnicode(u, __Pyx_Py_UNICODE_strlen(u)) #define __Pyx_PyUnicode_FromUnicodeAndLength PyUnicode_FromUnicode #define __Pyx_PyUnicode_AsUnicode PyUnicode_AsUnicode #define __Pyx_NewRef(obj) (Py_INCREF(obj), obj) #define __Pyx_Owned_Py_None(b) __Pyx_NewRef(Py_None) #define __Pyx_PyBool_FromLong(b) ((b) ? __Pyx_NewRef(Py_True) : __Pyx_NewRef(Py_False)) static CYTHON_INLINE int __Pyx_PyObject_IsTrue(PyObject*); static CYTHON_INLINE PyObject* __Pyx_PyNumber_IntOrLong(PyObject* x); static CYTHON_INLINE Py_ssize_t __Pyx_PyIndex_AsSsize_t(PyObject*); static CYTHON_INLINE PyObject * __Pyx_PyInt_FromSize_t(size_t); #if CYTHON_COMPILING_IN_CPYTHON #define __pyx_PyFloat_AsDouble(x) (PyFloat_CheckExact(x) ? PyFloat_AS_DOUBLE(x) : PyFloat_AsDouble(x)) #else #define __pyx_PyFloat_AsDouble(x) PyFloat_AsDouble(x) #endif #define __pyx_PyFloat_AsFloat(x) ((float) __pyx_PyFloat_AsDouble(x)) #if PY_MAJOR_VERSION >= 3 #define __Pyx_PyNumber_Int(x) (PyLong_CheckExact(x) ? __Pyx_NewRef(x) : PyNumber_Long(x)) #else #define __Pyx_PyNumber_Int(x) (PyInt_CheckExact(x) ? __Pyx_NewRef(x) : PyNumber_Int(x)) #endif #define __Pyx_PyNumber_Float(x) (PyFloat_CheckExact(x) ? __Pyx_NewRef(x) : PyNumber_Float(x)) #if PY_MAJOR_VERSION < 3 && __PYX_DEFAULT_STRING_ENCODING_IS_ASCII static int __Pyx_sys_getdefaultencoding_not_ascii; static int __Pyx_init_sys_getdefaultencoding_params(void) { PyObject* sys; PyObject* default_encoding = NULL; PyObject* ascii_chars_u = NULL; PyObject* ascii_chars_b = NULL; const char* default_encoding_c; sys = PyImport_ImportModule("sys"); if (!sys) goto bad; default_encoding = PyObject_CallMethod(sys, (char*) "getdefaultencoding", NULL); Py_DECREF(sys); if (!default_encoding) goto bad; default_encoding_c = PyBytes_AsString(default_encoding); if (!default_encoding_c) goto bad; if (strcmp(default_encoding_c, "ascii") == 0) { __Pyx_sys_getdefaultencoding_not_ascii = 0; } else { char ascii_chars[128]; int c; for (c = 0; c < 128; c++) { ascii_chars[c] = c; } __Pyx_sys_getdefaultencoding_not_ascii = 1; ascii_chars_u = PyUnicode_DecodeASCII(ascii_chars, 128, NULL); if (!ascii_chars_u) goto bad; ascii_chars_b = PyUnicode_AsEncodedString(ascii_chars_u, default_encoding_c, NULL); if (!ascii_chars_b || !PyBytes_Check(ascii_chars_b) || memcmp(ascii_chars, PyBytes_AS_STRING(ascii_chars_b), 128) != 0) { PyErr_Format( PyExc_ValueError, "This module compiled with c_string_encoding=ascii, but default encoding '%.200s' is not a superset of ascii.", default_encoding_c); goto bad; } Py_DECREF(ascii_chars_u); Py_DECREF(ascii_chars_b); } Py_DECREF(default_encoding); return 0; bad: Py_XDECREF(default_encoding); Py_XDECREF(ascii_chars_u); Py_XDECREF(ascii_chars_b); return -1; } #endif #if __PYX_DEFAULT_STRING_ENCODING_IS_DEFAULT && PY_MAJOR_VERSION >= 3 #define __Pyx_PyUnicode_FromStringAndSize(c_str, size) PyUnicode_DecodeUTF8(c_str, size, NULL) #else #define __Pyx_PyUnicode_FromStringAndSize(c_str, size) PyUnicode_Decode(c_str, size, __PYX_DEFAULT_STRING_ENCODING, NULL) #if __PYX_DEFAULT_STRING_ENCODING_IS_DEFAULT static char* __PYX_DEFAULT_STRING_ENCODING; static int __Pyx_init_sys_getdefaultencoding_params(void) { PyObject* sys; PyObject* default_encoding = NULL; char* default_encoding_c; sys = PyImport_ImportModule("sys"); if (!sys) goto bad; default_encoding = PyObject_CallMethod(sys, (char*) (const char*) "getdefaultencoding", NULL); Py_DECREF(sys); if (!default_encoding) goto bad; default_encoding_c = PyBytes_AsString(default_encoding); if (!default_encoding_c) goto bad; __PYX_DEFAULT_STRING_ENCODING = (char*) malloc(strlen(default_encoding_c)); if (!__PYX_DEFAULT_STRING_ENCODING) goto bad; strcpy(__PYX_DEFAULT_STRING_ENCODING, default_encoding_c); Py_DECREF(default_encoding); return 0; bad: Py_XDECREF(default_encoding); return -1; } #endif #endif /* Test for GCC > 2.95 */ #if defined(__GNUC__) && (__GNUC__ > 2 || (__GNUC__ == 2 && (__GNUC_MINOR__ > 95))) #define likely(x) __builtin_expect(!!(x), 1) #define unlikely(x) __builtin_expect(!!(x), 0) #else /* !__GNUC__ or GCC < 2.95 */ #define likely(x) (x) #define unlikely(x) (x) #endif /* __GNUC__ */ static PyObject *__pyx_m; static PyObject *__pyx_d; static PyObject *__pyx_b; static PyObject *__pyx_empty_tuple; static PyObject *__pyx_empty_bytes; static PyObject *__pyx_empty_unicode; static int __pyx_lineno; static int __pyx_clineno = 0; static const char * __pyx_cfilenm= __FILE__; static const char *__pyx_filename; /* None.proto */ #if !defined(CYTHON_CCOMPLEX) #if defined(__cplusplus) #define CYTHON_CCOMPLEX 1 #elif defined(_Complex_I) #define CYTHON_CCOMPLEX 1 #else #define CYTHON_CCOMPLEX 0 #endif #endif #if CYTHON_CCOMPLEX #ifdef __cplusplus #include <complex> #else #include <complex.h> #endif #endif #if CYTHON_CCOMPLEX && !defined(__cplusplus) && defined(__sun__) && defined(__GNUC__) #undef _Complex_I #define _Complex_I 1.0fj #endif static const char *__pyx_f[] = { "nms\\gpu_nms.pyx", "__init__.pxd", "type.pxd", }; /* BufferFormatStructs.proto */ #define IS_UNSIGNED(type) (((type) -1) > 0) struct __Pyx_StructField_; #define __PYX_BUF_FLAGS_PACKED_STRUCT (1 << 0) typedef struct { const char* name; struct __Pyx_StructField_* fields; size_t size; size_t arraysize[8]; int ndim; char typegroup; char is_unsigned; int flags; } __Pyx_TypeInfo; typedef struct __Pyx_StructField_ { __Pyx_TypeInfo* type; const char* name; size_t offset; } __Pyx_StructField; typedef struct { __Pyx_StructField* field; size_t parent_offset; } __Pyx_BufFmt_StackElem; typedef struct { __Pyx_StructField root; __Pyx_BufFmt_StackElem* head; size_t fmt_offset; size_t new_count, enc_count; size_t struct_alignment; int is_complex; char enc_type; char new_packmode; char enc_packmode; char is_valid_array; } __Pyx_BufFmt_Context; /* "C:/Anaconda2/lib/site-packages/Cython/Includes/numpy/__init__.pxd":725 * # in Cython to enable them only on the right systems. * * ctypedef npy_int8 int8_t # <<<<<<<<<<<<<< * ctypedef npy_int16 int16_t * ctypedef npy_int32 int32_t */ typedef npy_int8 __pyx_t_5numpy_int8_t; /* "C:/Anaconda2/lib/site-packages/Cython/Includes/numpy/__init__.pxd":726 * * ctypedef npy_int8 int8_t * ctypedef npy_int16 int16_t # <<<<<<<<<<<<<< * ctypedef npy_int32 int32_t * ctypedef npy_int64 int64_t */ typedef npy_int16 __pyx_t_5numpy_int16_t; /* "C:/Anaconda2/lib/site-packages/Cython/Includes/numpy/__init__.pxd":727 * ctypedef npy_int8 int8_t * ctypedef npy_int16 int16_t * ctypedef npy_int32 int32_t # <<<<<<<<<<<<<< * ctypedef npy_int64 int64_t * #ctypedef npy_int96 int96_t */ typedef npy_int32 __pyx_t_5numpy_int32_t; /* "C:/Anaconda2/lib/site-packages/Cython/Includes/numpy/__init__.pxd":728 * ctypedef npy_int16 int16_t * ctypedef npy_int32 int32_t * ctypedef npy_int64 int64_t # <<<<<<<<<<<<<< * #ctypedef npy_int96 int96_t * #ctypedef npy_int128 int128_t */ typedef npy_int64 __pyx_t_5numpy_int64_t; /* "C:/Anaconda2/lib/site-packages/Cython/Includes/numpy/__init__.pxd":732 * #ctypedef npy_int128 int128_t * * ctypedef npy_uint8 uint8_t # <<<<<<<<<<<<<< * ctypedef npy_uint16 uint16_t * ctypedef npy_uint32 uint32_t */ typedef npy_uint8 __pyx_t_5numpy_uint8_t; /* "C:/Anaconda2/lib/site-packages/Cython/Includes/numpy/__init__.pxd":733 * * ctypedef npy_uint8 uint8_t * ctypedef npy_uint16 uint16_t # <<<<<<<<<<<<<< * ctypedef npy_uint32 uint32_t * ctypedef npy_uint64 uint64_t */ typedef npy_uint16 __pyx_t_5numpy_uint16_t; /* "C:/Anaconda2/lib/site-packages/Cython/Includes/numpy/__init__.pxd":734 * ctypedef npy_uint8 uint8_t * ctypedef npy_uint16 uint16_t * ctypedef npy_uint32 uint32_t # <<<<<<<<<<<<<< * ctypedef npy_uint64 uint64_t * #ctypedef npy_uint96 uint96_t */ typedef npy_uint32 __pyx_t_5numpy_uint32_t; /* "C:/Anaconda2/lib/site-packages/Cython/Includes/numpy/__init__.pxd":735 * ctypedef npy_uint16 uint16_t * ctypedef npy_uint32 uint32_t * ctypedef npy_uint64 uint64_t # <<<<<<<<<<<<<< * #ctypedef npy_uint96 uint96_t * #ctypedef npy_uint128 uint128_t */ typedef npy_uint64 __pyx_t_5numpy_uint64_t; /* "C:/Anaconda2/lib/site-packages/Cython/Includes/numpy/__init__.pxd":739 * #ctypedef npy_uint128 uint128_t * * ctypedef npy_float32 float32_t # <<<<<<<<<<<<<< * ctypedef npy_float64 float64_t * #ctypedef npy_float80 float80_t */ typedef npy_float32 __pyx_t_5numpy_float32_t; /* "C:/Anaconda2/lib/site-packages/Cython/Includes/numpy/__init__.pxd":740 * * ctypedef npy_float32 float32_t * ctypedef npy_float64 float64_t # <<<<<<<<<<<<<< * #ctypedef npy_float80 float80_t * #ctypedef npy_float128 float128_t */ typedef npy_float64 __pyx_t_5numpy_float64_t; /* "C:/Anaconda2/lib/site-packages/Cython/Includes/numpy/__init__.pxd":749 * # The int types are mapped a bit surprising -- * # numpy.int corresponds to 'l' and numpy.long to 'q' * ctypedef npy_long int_t # <<<<<<<<<<<<<< * ctypedef npy_longlong long_t * ctypedef npy_longlong longlong_t */ typedef npy_long __pyx_t_5numpy_int_t; /* "C:/Anaconda2/lib/site-packages/Cython/Includes/numpy/__init__.pxd":750 * # numpy.int corresponds to 'l' and numpy.long to 'q' * ctypedef npy_long int_t * ctypedef npy_longlong long_t # <<<<<<<<<<<<<< * ctypedef npy_longlong longlong_t * */ typedef npy_longlong __pyx_t_5numpy_long_t; /* "C:/Anaconda2/lib/site-packages/Cython/Includes/numpy/__init__.pxd":751 * ctypedef npy_long int_t * ctypedef npy_longlong long_t * ctypedef npy_longlong longlong_t # <<<<<<<<<<<<<< * * ctypedef npy_ulong uint_t */ typedef npy_longlong __pyx_t_5numpy_longlong_t; /* "C:/Anaconda2/lib/site-packages/Cython/Includes/numpy/__init__.pxd":753 * ctypedef npy_longlong longlong_t * * ctypedef npy_ulong uint_t # <<<<<<<<<<<<<< * ctypedef npy_ulonglong ulong_t * ctypedef npy_ulonglong ulonglong_t */ typedef npy_ulong __pyx_t_5numpy_uint_t; /* "C:/Anaconda2/lib/site-packages/Cython/Includes/numpy/__init__.pxd":754 * * ctypedef npy_ulong uint_t * ctypedef npy_ulonglong ulong_t # <<<<<<<<<<<<<< * ctypedef npy_ulonglong ulonglong_t * */ typedef npy_ulonglong __pyx_t_5numpy_ulong_t; /* "C:/Anaconda2/lib/site-packages/Cython/Includes/numpy/__init__.pxd":755 * ctypedef npy_ulong uint_t * ctypedef npy_ulonglong ulong_t * ctypedef npy_ulonglong ulonglong_t # <<<<<<<<<<<<<< * * ctypedef npy_intp intp_t */ typedef npy_ulonglong __pyx_t_5numpy_ulonglong_t; /* "C:/Anaconda2/lib/site-packages/Cython/Includes/numpy/__init__.pxd":757 * ctypedef npy_ulonglong ulonglong_t * * ctypedef npy_intp intp_t # <<<<<<<<<<<<<< * ctypedef npy_uintp uintp_t * */ typedef npy_intp __pyx_t_5numpy_intp_t; /* "C:/Anaconda2/lib/site-packages/Cython/Includes/numpy/__init__.pxd":758 * * ctypedef npy_intp intp_t * ctypedef npy_uintp uintp_t # <<<<<<<<<<<<<< * * ctypedef npy_double float_t */ typedef npy_uintp __pyx_t_5numpy_uintp_t; /* "C:/Anaconda2/lib/site-packages/Cython/Includes/numpy/__init__.pxd":760 * ctypedef npy_uintp uintp_t * * ctypedef npy_double float_t # <<<<<<<<<<<<<< * ctypedef npy_double double_t * ctypedef npy_longdouble longdouble_t */ typedef npy_double __pyx_t_5numpy_float_t; /* "C:/Anaconda2/lib/site-packages/Cython/Includes/numpy/__init__.pxd":761 * * ctypedef npy_double float_t * ctypedef npy_double double_t # <<<<<<<<<<<<<< * ctypedef npy_longdouble longdouble_t * */ typedef npy_double __pyx_t_5numpy_double_t; /* "C:/Anaconda2/lib/site-packages/Cython/Includes/numpy/__init__.pxd":762 * ctypedef npy_double float_t * ctypedef npy_double double_t * ctypedef npy_longdouble longdouble_t # <<<<<<<<<<<<<< * * ctypedef npy_cfloat cfloat_t */ typedef npy_longdouble __pyx_t_5numpy_longdouble_t; /* None.proto */ #if CYTHON_CCOMPLEX #ifdef __cplusplus typedef ::std::complex< float > __pyx_t_float_complex; #else typedef float _Complex __pyx_t_float_complex; #endif #else typedef struct { float real, imag; } __pyx_t_float_complex; #endif /* None.proto */ #if CYTHON_CCOMPLEX #ifdef __cplusplus typedef ::std::complex< double > __pyx_t_double_complex; #else typedef double _Complex __pyx_t_double_complex; #endif #else typedef struct { double real, imag; } __pyx_t_double_complex; #endif /*--- Type declarations ---*/ /* "C:/Anaconda2/lib/site-packages/Cython/Includes/numpy/__init__.pxd":764 * ctypedef npy_longdouble longdouble_t * * ctypedef npy_cfloat cfloat_t # <<<<<<<<<<<<<< * ctypedef npy_cdouble cdouble_t * ctypedef npy_clongdouble clongdouble_t */ typedef npy_cfloat __pyx_t_5numpy_cfloat_t; /* "C:/Anaconda2/lib/site-packages/Cython/Includes/numpy/__init__.pxd":765 * * ctypedef npy_cfloat cfloat_t * ctypedef npy_cdouble cdouble_t # <<<<<<<<<<<<<< * ctypedef npy_clongdouble clongdouble_t * */ typedef npy_cdouble __pyx_t_5numpy_cdouble_t; /* "C:/Anaconda2/lib/site-packages/Cython/Includes/numpy/__init__.pxd":766 * ctypedef npy_cfloat cfloat_t * ctypedef npy_cdouble cdouble_t * ctypedef npy_clongdouble clongdouble_t # <<<<<<<<<<<<<< * * ctypedef npy_cdouble complex_t */ typedef npy_clongdouble __pyx_t_5numpy_clongdouble_t; /* "C:/Anaconda2/lib/site-packages/Cython/Includes/numpy/__init__.pxd":768 * ctypedef npy_clongdouble clongdouble_t * * ctypedef npy_cdouble complex_t # <<<<<<<<<<<<<< * * cdef inline object PyArray_MultiIterNew1(a): */ typedef npy_cdouble __pyx_t_5numpy_complex_t; /* --- Runtime support code (head) --- */ /* Refnanny.proto */ #ifndef CYTHON_REFNANNY #define CYTHON_REFNANNY 0 #endif #if CYTHON_REFNANNY typedef struct { void (*INCREF)(void*, PyObject*, int); void (*DECREF)(void*, PyObject*, int); void (*GOTREF)(void*, PyObject*, int); void (*GIVEREF)(void*, PyObject*, int); void* (*SetupContext)(const char*, int, const char*); void (*FinishContext)(void**); } __Pyx_RefNannyAPIStruct; static __Pyx_RefNannyAPIStruct *__Pyx_RefNanny = NULL; static __Pyx_RefNannyAPIStruct *__Pyx_RefNannyImportAPI(const char *modname); #define __Pyx_RefNannyDeclarations void *__pyx_refnanny = NULL; #ifdef WITH_THREAD #define __Pyx_RefNannySetupContext(name, acquire_gil)\ if (acquire_gil) {\ PyGILState_STATE __pyx_gilstate_save = PyGILState_Ensure();\ __pyx_refnanny = __Pyx_RefNanny->SetupContext((name), __LINE__, __FILE__);\ PyGILState_Release(__pyx_gilstate_save);\ } else {\ __pyx_refnanny = __Pyx_RefNanny->SetupContext((name), __LINE__, __FILE__);\ } #else #define __Pyx_RefNannySetupContext(name, acquire_gil)\ __pyx_refnanny = __Pyx_RefNanny->SetupContext((name), __LINE__, __FILE__) #endif #define __Pyx_RefNannyFinishContext()\ __Pyx_RefNanny->FinishContext(&__pyx_refnanny) #define __Pyx_INCREF(r) __Pyx_RefNanny->INCREF(__pyx_refnanny, (PyObject *)(r), __LINE__) #define __Pyx_DECREF(r) __Pyx_RefNanny->DECREF(__pyx_refnanny, (PyObject *)(r), __LINE__) #define __Pyx_GOTREF(r) __Pyx_RefNanny->GOTREF(__pyx_refnanny, (PyObject *)(r), __LINE__) #define __Pyx_GIVEREF(r) __Pyx_RefNanny->GIVEREF(__pyx_refnanny, (PyObject *)(r), __LINE__) #define __Pyx_XINCREF(r) do { if((r) != NULL) {__Pyx_INCREF(r); }} while(0) #define __Pyx_XDECREF(r) do { if((r) != NULL) {__Pyx_DECREF(r); }} while(0) #define __Pyx_XGOTREF(r) do { if((r) != NULL) {__Pyx_GOTREF(r); }} while(0) #define __Pyx_XGIVEREF(r) do { if((r) != NULL) {__Pyx_GIVEREF(r);}} while(0) #else #define __Pyx_RefNannyDeclarations #define __Pyx_RefNannySetupContext(name, acquire_gil) #define __Pyx_RefNannyFinishContext() #define __Pyx_INCREF(r) Py_INCREF(r) #define __Pyx_DECREF(r) Py_DECREF(r) #define __Pyx_GOTREF(r) #define __Pyx_GIVEREF(r) #define __Pyx_XINCREF(r) Py_XINCREF(r) #define __Pyx_XDECREF(r) Py_XDECREF(r) #define __Pyx_XGOTREF(r) #define __Pyx_XGIVEREF(r) #endif #define __Pyx_XDECREF_SET(r, v) do {\ PyObject *tmp = (PyObject *) r;\ r = v; __Pyx_XDECREF(tmp);\ } while (0) #define __Pyx_DECREF_SET(r, v) do {\ PyObject *tmp = (PyObject *) r;\ r = v; __Pyx_DECREF(tmp);\ } while (0) #define __Pyx_CLEAR(r) do { PyObject* tmp = ((PyObject*)(r)); r = NULL; __Pyx_DECREF(tmp);} while(0) #define __Pyx_XCLEAR(r) do { if((r) != NULL) {PyObject* tmp = ((PyObject*)(r)); r = NULL; __Pyx_DECREF(tmp);}} while(0) /* RaiseArgTupleInvalid.proto */ static void __Pyx_RaiseArgtupleInvalid(const char* func_name, int exact, Py_ssize_t num_min, Py_ssize_t num_max, Py_ssize_t num_found); /* RaiseDoubleKeywords.proto */ static void __Pyx_RaiseDoubleKeywordsError(const char* func_name, PyObject* kw_name); /* ParseKeywords.proto */ static int __Pyx_ParseOptionalKeywords(PyObject *kwds, PyObject **argnames[],\ PyObject *kwds2, PyObject *values[], Py_ssize_t num_pos_args,\ const char* function_name); /* ArgTypeTest.proto */ static CYTHON_INLINE int __Pyx_ArgTypeTest(PyObject *obj, PyTypeObject *type, int none_allowed, const char *name, int exact); /* BufferFormatCheck.proto */ static CYTHON_INLINE int __Pyx_GetBufferAndValidate(Py_buffer* buf, PyObject* obj, __Pyx_TypeInfo* dtype, int flags, int nd, int cast, __Pyx_BufFmt_StackElem* stack); static CYTHON_INLINE void __Pyx_SafeReleaseBuffer(Py_buffer* info); static const char* __Pyx_BufFmt_CheckString(__Pyx_BufFmt_Context* ctx, const char* ts); static void __Pyx_BufFmt_Init(__Pyx_BufFmt_Context* ctx, __Pyx_BufFmt_StackElem* stack, __Pyx_TypeInfo* type); // PROTO /* PyObjectGetAttrStr.proto */ #if CYTHON_COMPILING_IN_CPYTHON static CYTHON_INLINE PyObject* __Pyx_PyObject_GetAttrStr(PyObject* obj, PyObject* attr_name) { PyTypeObject* tp = Py_TYPE(obj); if (likely(tp->tp_getattro)) return tp->tp_getattro(obj, attr_name); #if PY_MAJOR_VERSION < 3 if (likely(tp->tp_getattr)) return tp->tp_getattr(obj, PyString_AS_STRING(attr_name)); #endif return PyObject_GetAttr(obj, attr_name); } #else #define __Pyx_PyObject_GetAttrStr(o,n) PyObject_GetAttr(o,n) #endif /* GetBuiltinName.proto */ static PyObject *__Pyx_GetBuiltinName(PyObject *name); /* GetModuleGlobalName.proto */ static CYTHON_INLINE PyObject *__Pyx_GetModuleGlobalName(PyObject *name); /* PyObjectCall.proto */ #if CYTHON_COMPILING_IN_CPYTHON static CYTHON_INLINE PyObject* __Pyx_PyObject_Call(PyObject *func, PyObject *arg, PyObject *kw); #else #define __Pyx_PyObject_Call(func, arg, kw) PyObject_Call(func, arg, kw) #endif /* ExtTypeTest.proto */ static CYTHON_INLINE int __Pyx_TypeTest(PyObject *obj, PyTypeObject *type); /* PyObjectCallMethO.proto */ #if CYTHON_COMPILING_IN_CPYTHON static CYTHON_INLINE PyObject* __Pyx_PyObject_CallMethO(PyObject *func, PyObject *arg); #endif /* PyObjectCallOneArg.proto */ static CYTHON_INLINE PyObject* __Pyx_PyObject_CallOneArg(PyObject *func, PyObject *arg); /* PyObjectCallNoArg.proto */ #if CYTHON_COMPILING_IN_CPYTHON static CYTHON_INLINE PyObject* __Pyx_PyObject_CallNoArg(PyObject *func); #else #define __Pyx_PyObject_CallNoArg(func) __Pyx_PyObject_Call(func, __pyx_empty_tuple, NULL) #endif /* BufferIndexError.proto */ static void __Pyx_RaiseBufferIndexError(int axis); #define __Pyx_BufPtrStrided1d(type, buf, i0, s0) (type)((char*)buf + i0 * s0) #define __Pyx_BufPtrStrided2d(type, buf, i0, s0, i1, s1) (type)((char*)buf + i0 * s0 + i1 * s1) /* SliceObject.proto */ static CYTHON_INLINE PyObject* __Pyx_PyObject_GetSlice( PyObject* obj, Py_ssize_t cstart, Py_ssize_t cstop, PyObject** py_start, PyObject** py_stop, PyObject** py_slice, int has_cstart, int has_cstop, int wraparound); /* BufferFallbackError.proto */ static void __Pyx_RaiseBufferFallbackError(void); /* PyThreadStateGet.proto */ #if CYTHON_COMPILING_IN_CPYTHON #define __Pyx_PyThreadState_declare PyThreadState *__pyx_tstate; #define __Pyx_PyThreadState_assign __pyx_tstate = PyThreadState_GET(); #else #define __Pyx_PyThreadState_declare #define __Pyx_PyThreadState_assign #endif /* PyErrFetchRestore.proto */ #if CYTHON_COMPILING_IN_CPYTHON #define __Pyx_ErrRestoreWithState(type, value, tb) __Pyx_ErrRestoreInState(PyThreadState_GET(), type, value, tb) #define __Pyx_ErrFetchWithState(type, value, tb) __Pyx_ErrFetchInState(PyThreadState_GET(), type, value, tb) #define __Pyx_ErrRestore(type, value, tb) __Pyx_ErrRestoreInState(__pyx_tstate, type, value, tb) #define __Pyx_ErrFetch(type, value, tb) __Pyx_ErrFetchInState(__pyx_tstate, type, value, tb) static CYTHON_INLINE void __Pyx_ErrRestoreInState(PyThreadState *tstate, PyObject *type, PyObject *value, PyObject *tb); static CYTHON_INLINE void __Pyx_ErrFetchInState(PyThreadState *tstate, PyObject **type, PyObject **value, PyObject **tb); #else #define __Pyx_ErrRestoreWithState(type, value, tb) PyErr_Restore(type, value, tb) #define __Pyx_ErrFetchWithState(type, value, tb) PyErr_Fetch(type, value, tb) #define __Pyx_ErrRestore(type, value, tb) PyErr_Restore(type, value, tb) #define __Pyx_ErrFetch(type, value, tb) PyErr_Fetch(type, value, tb) #endif /* RaiseException.proto */ static void __Pyx_Raise(PyObject *type, PyObject *value, PyObject *tb, PyObject *cause); /* DictGetItem.proto */ #if PY_MAJOR_VERSION >= 3 && !CYTHON_COMPILING_IN_PYPY static PyObject *__Pyx_PyDict_GetItem(PyObject *d, PyObject* key) { PyObject *value; value = PyDict_GetItemWithError(d, key); if (unlikely(!value)) { if (!PyErr_Occurred()) { PyObject* args = PyTuple_Pack(1, key); if (likely(args)) PyErr_SetObject(PyExc_KeyError, args); Py_XDECREF(args); } return NULL; } Py_INCREF(value); return value; } #else #define __Pyx_PyDict_GetItem(d, key) PyObject_GetItem(d, key) #endif /* RaiseTooManyValuesToUnpack.proto */ static CYTHON_INLINE void __Pyx_RaiseTooManyValuesError(Py_ssize_t expected); /* RaiseNeedMoreValuesToUnpack.proto */ static CYTHON_INLINE void __Pyx_RaiseNeedMoreValuesError(Py_ssize_t index); /* RaiseNoneIterError.proto */ static CYTHON_INLINE void __Pyx_RaiseNoneNotIterableError(void); /* Import.proto */ static PyObject *__Pyx_Import(PyObject *name, PyObject *from_list, int level); /* CodeObjectCache.proto */ typedef struct { PyCodeObject* code_object; int code_line; } __Pyx_CodeObjectCacheEntry; struct __Pyx_CodeObjectCache { int count; int max_count; __Pyx_CodeObjectCacheEntry* entries; }; static struct __Pyx_CodeObjectCache __pyx_code_cache = {0,0,NULL}; static int __pyx_bisect_code_objects(__Pyx_CodeObjectCacheEntry* entries, int count, int code_line); static PyCodeObject *__pyx_find_code_object(int code_line); static void __pyx_insert_code_object(int code_line, PyCodeObject* code_object); /* AddTraceback.proto */ static void __Pyx_AddTraceback(const char *funcname, int c_line, int py_line, const char *filename); /* BufferStructDeclare.proto */ typedef struct { Py_ssize_t shape, strides, suboffsets; } __Pyx_Buf_DimInfo; typedef struct { size_t refcount; Py_buffer pybuffer; } __Pyx_Buffer; typedef struct { __Pyx_Buffer *rcbuffer; char *data; __Pyx_Buf_DimInfo diminfo[8]; } __Pyx_LocalBuf_ND; #if PY_MAJOR_VERSION < 3 static int __Pyx_GetBuffer(PyObject *obj, Py_buffer *view, int flags); static void __Pyx_ReleaseBuffer(Py_buffer *view); #else #define __Pyx_GetBuffer PyObject_GetBuffer #define __Pyx_ReleaseBuffer PyBuffer_Release #endif /* None.proto */ static Py_ssize_t __Pyx_zeros[] = {0, 0, 0, 0, 0, 0, 0, 0}; static Py_ssize_t __Pyx_minusones[] = {-1, -1, -1, -1, -1, -1, -1, -1}; /* CIntToPy.proto */ static CYTHON_INLINE PyObject* __Pyx_PyInt_From_int(int value); /* None.proto */ #if CYTHON_CCOMPLEX #ifdef __cplusplus #define __Pyx_CREAL(z) ((z).real()) #define __Pyx_CIMAG(z) ((z).imag()) #else #define __Pyx_CREAL(z) (__real__(z)) #define __Pyx_CIMAG(z) (__imag__(z)) #endif #else #define __Pyx_CREAL(z) ((z).real) #define __Pyx_CIMAG(z) ((z).imag) #endif #if defined(__cplusplus) && CYTHON_CCOMPLEX && (defined(_WIN32) || defined(__clang__) || (defined(__GNUC__) && (__GNUC__ >= 5 || __GNUC__ == 4 && __GNUC_MINOR__ >= 4 )) || __cplusplus >= 201103) #define __Pyx_SET_CREAL(z,x) ((z).real(x)) #define __Pyx_SET_CIMAG(z,y) ((z).imag(y)) #else #define __Pyx_SET_CREAL(z,x) __Pyx_CREAL(z) = (x) #define __Pyx_SET_CIMAG(z,y) __Pyx_CIMAG(z) = (y) #endif /* None.proto */ static CYTHON_INLINE __pyx_t_float_complex __pyx_t_float_complex_from_parts(float, float); /* None.proto */ #if CYTHON_CCOMPLEX #define __Pyx_c_eqf(a, b) ((a)==(b)) #define __Pyx_c_sumf(a, b) ((a)+(b)) #define __Pyx_c_difff(a, b) ((a)-(b)) #define __Pyx_c_prodf(a, b) ((a)*(b)) #define __Pyx_c_quotf(a, b) ((a)/(b)) #define __Pyx_c_negf(a) (-(a)) #ifdef __cplusplus #define __Pyx_c_is_zerof(z) ((z)==(float)0) #define __Pyx_c_conjf(z) (::std::conj(z)) #if 1 #define __Pyx_c_absf(z) (::std::abs(z)) #define __Pyx_c_powf(a, b) (::std::pow(a, b)) #endif #else #define __Pyx_c_is_zerof(z) ((z)==0) #define __Pyx_c_conjf(z) (conjf(z)) #if 1 #define __Pyx_c_absf(z) (cabsf(z)) #define __Pyx_c_powf(a, b) (cpowf(a, b)) #endif #endif #else static CYTHON_INLINE int __Pyx_c_eqf(__pyx_t_float_complex, __pyx_t_float_complex); static CYTHON_INLINE __pyx_t_float_complex __Pyx_c_sumf(__pyx_t_float_complex, __pyx_t_float_complex); static CYTHON_INLINE __pyx_t_float_complex __Pyx_c_difff(__pyx_t_float_complex, __pyx_t_float_complex); static CYTHON_INLINE __pyx_t_float_complex __Pyx_c_prodf(__pyx_t_float_complex, __pyx_t_float_complex); static CYTHON_INLINE __pyx_t_float_complex __Pyx_c_quotf(__pyx_t_float_complex, __pyx_t_float_complex); static CYTHON_INLINE __pyx_t_float_complex __Pyx_c_negf(__pyx_t_float_complex); static CYTHON_INLINE int __Pyx_c_is_zerof(__pyx_t_float_complex); static CYTHON_INLINE __pyx_t_float_complex __Pyx_c_conjf(__pyx_t_float_complex); #if 1 static CYTHON_INLINE float __Pyx_c_absf(__pyx_t_float_complex); static CYTHON_INLINE __pyx_t_float_complex __Pyx_c_powf(__pyx_t_float_complex, __pyx_t_float_complex); #endif #endif /* None.proto */ static CYTHON_INLINE __pyx_t_double_complex __pyx_t_double_complex_from_parts(double, double); /* None.proto */ #if CYTHON_CCOMPLEX #define __Pyx_c_eq(a, b) ((a)==(b)) #define __Pyx_c_sum(a, b) ((a)+(b)) #define __Pyx_c_diff(a, b) ((a)-(b)) #define __Pyx_c_prod(a, b) ((a)*(b)) #define __Pyx_c_quot(a, b) ((a)/(b)) #define __Pyx_c_neg(a) (-(a)) #ifdef __cplusplus #define __Pyx_c_is_zero(z) ((z)==(double)0) #define __Pyx_c_conj(z) (::std::conj(z)) #if 1 #define __Pyx_c_abs(z) (::std::abs(z)) #define __Pyx_c_pow(a, b) (::std::pow(a, b)) #endif #else #define __Pyx_c_is_zero(z) ((z)==0) #define __Pyx_c_conj(z) (conj(z)) #if 1 #define __Pyx_c_abs(z) (cabs(z)) #define __Pyx_c_pow(a, b) (cpow(a, b)) #endif #endif #else static CYTHON_INLINE int __Pyx_c_eq(__pyx_t_double_complex, __pyx_t_double_complex); static CYTHON_INLINE __pyx_t_double_complex __Pyx_c_sum(__pyx_t_double_complex, __pyx_t_double_complex); static CYTHON_INLINE __pyx_t_double_complex __Pyx_c_diff(__pyx_t_double_complex, __pyx_t_double_complex); static CYTHON_INLINE __pyx_t_double_complex __Pyx_c_prod(__pyx_t_double_complex, __pyx_t_double_complex); static CYTHON_INLINE __pyx_t_double_complex __Pyx_c_quot(__pyx_t_double_complex, __pyx_t_double_complex); static CYTHON_INLINE __pyx_t_double_complex __Pyx_c_neg(__pyx_t_double_complex); static CYTHON_INLINE int __Pyx_c_is_zero(__pyx_t_double_complex); static CYTHON_INLINE __pyx_t_double_complex __Pyx_c_conj(__pyx_t_double_complex); #if 1 static CYTHON_INLINE double __Pyx_c_abs(__pyx_t_double_complex); static CYTHON_INLINE __pyx_t_double_complex __Pyx_c_pow(__pyx_t_double_complex, __pyx_t_double_complex); #endif #endif /* CIntToPy.proto */ static CYTHON_INLINE PyObject* __Pyx_PyInt_From_enum__NPY_TYPES(enum NPY_TYPES value); /* CIntFromPy.proto */ static CYTHON_INLINE npy_int32 __Pyx_PyInt_As_npy_int32(PyObject *); /* CIntFromPy.proto */ static CYTHON_INLINE int __Pyx_PyInt_As_int(PyObject *); /* CIntToPy.proto */ static CYTHON_INLINE PyObject* __Pyx_PyInt_From_long(long value); /* CIntFromPy.proto */ static CYTHON_INLINE long __Pyx_PyInt_As_long(PyObject *); /* CheckBinaryVersion.proto */ static int __Pyx_check_binary_version(void); /* PyIdentifierFromString.proto */ #if !defined(__Pyx_PyIdentifier_FromString) #if PY_MAJOR_VERSION < 3 #define __Pyx_PyIdentifier_FromString(s) PyString_FromString(s) #else #define __Pyx_PyIdentifier_FromString(s) PyUnicode_FromString(s) #endif #endif /* ModuleImport.proto */ static PyObject *__Pyx_ImportModule(const char *name); /* TypeImport.proto */ static PyTypeObject *__Pyx_ImportType(const char *module_name, const char *class_name, size_t size, int strict); /* InitStrings.proto */ static int __Pyx_InitStrings(__Pyx_StringTabEntry *t); /* Module declarations from 'cpython.buffer' */ /* Module declarations from 'libc.string' */ /* Module declarations from 'libc.stdio' */ /* Module declarations from '__builtin__' */ /* Module declarations from 'cpython.type' */ static PyTypeObject *__pyx_ptype_7cpython_4type_type = 0; /* Module declarations from 'cpython' */ /* Module declarations from 'cpython.object' */ /* Module declarations from 'cpython.ref' */ /* Module declarations from 'libc.stdlib' */ /* Module declarations from 'numpy' */ /* Module declarations from 'numpy' */ static PyTypeObject *__pyx_ptype_5numpy_dtype = 0; static PyTypeObject *__pyx_ptype_5numpy_flatiter = 0; static PyTypeObject *__pyx_ptype_5numpy_broadcast = 0; static PyTypeObject *__pyx_ptype_5numpy_ndarray = 0; static PyTypeObject *__pyx_ptype_5numpy_ufunc = 0; static CYTHON_INLINE char *__pyx_f_5numpy__util_dtypestring(PyArray_Descr *, char *, char *, int *); /*proto*/ /* Module declarations from 'nms.gpu_nms' */ static __Pyx_TypeInfo __Pyx_TypeInfo_nn___pyx_t_5numpy_float32_t = { "float32_t", NULL, sizeof(__pyx_t_5numpy_float32_t), { 0 }, 0, 'R', 0, 0 }; static __Pyx_TypeInfo __Pyx_TypeInfo_nn___pyx_t_5numpy_int32_t = { "int32_t", NULL, sizeof(__pyx_t_5numpy_int32_t), { 0 }, 0, IS_UNSIGNED(__pyx_t_5numpy_int32_t) ? 'U' : 'I', IS_UNSIGNED(__pyx_t_5numpy_int32_t), 0 }; static __Pyx_TypeInfo __Pyx_TypeInfo_nn___pyx_t_5numpy_intp_t = { "intp_t", NULL, sizeof(__pyx_t_5numpy_intp_t), { 0 }, 0, IS_UNSIGNED(__pyx_t_5numpy_intp_t) ? 'U' : 'I', IS_UNSIGNED(__pyx_t_5numpy_intp_t), 0 }; #define __Pyx_MODULE_NAME "nms.gpu_nms" int __pyx_module_is_main_nms__gpu_nms = 0; /* Implementation of 'nms.gpu_nms' */ static PyObject *__pyx_builtin_ValueError; static PyObject *__pyx_builtin_range; static PyObject *__pyx_builtin_RuntimeError; static const char __pyx_k_np[] = "np"; static const char __pyx_k_dets[] = "dets"; static const char __pyx_k_keep[] = "keep"; static const char __pyx_k_main[] = "__main__"; static const char __pyx_k_test[] = "__test__"; static const char __pyx_k_dtype[] = "dtype"; static const char __pyx_k_int32[] = "int32"; static const char __pyx_k_numpy[] = "numpy"; static const char __pyx_k_order[] = "order"; static const char __pyx_k_range[] = "range"; static const char __pyx_k_zeros[] = "zeros"; static const char __pyx_k_import[] = "__import__"; static const char __pyx_k_scores[] = "scores"; static const char __pyx_k_thresh[] = "thresh"; static const char __pyx_k_argsort[] = "argsort"; static const char __pyx_k_gpu_nms[] = "gpu_nms"; static const char __pyx_k_num_out[] = "num_out"; static const char __pyx_k_boxes_dim[] = "boxes_dim"; static const char __pyx_k_boxes_num[] = "boxes_num"; static const char __pyx_k_device_id[] = "device_id"; static const char __pyx_k_ValueError[] = "ValueError"; static const char __pyx_k_nms_gpu_nms[] = "nms.gpu_nms"; static const char __pyx_k_sorted_dets[] = "sorted_dets"; static const char __pyx_k_RuntimeError[] = "RuntimeError"; static const char __pyx_k_ndarray_is_not_C_contiguous[] = "ndarray is not C contiguous"; static const char __pyx_k_unknown_dtype_code_in_numpy_pxd[] = "unknown dtype code in numpy.pxd (%d)"; static const char __pyx_k_D_v_zix_caffe_caffe_win_20160523[] = "D:\\v-zix\\caffe\\caffe-win-20160523\\models\\py-faster-rcnn-windows\\lib\\nms\\gpu_nms.pyx"; static const char __pyx_k_Format_string_allocated_too_shor[] = "Format string allocated too short, see comment in numpy.pxd"; static const char __pyx_k_Non_native_byte_order_not_suppor[] = "Non-native byte order not supported"; static const char __pyx_k_ndarray_is_not_Fortran_contiguou[] = "ndarray is not Fortran contiguous"; static const char __pyx_k_Format_string_allocated_too_shor_2[] = "Format string allocated too short."; static PyObject *__pyx_kp_s_D_v_zix_caffe_caffe_win_20160523; static PyObject *__pyx_kp_u_Format_string_allocated_too_shor; static PyObject *__pyx_kp_u_Format_string_allocated_too_shor_2; static PyObject *__pyx_kp_u_Non_native_byte_order_not_suppor; static PyObject *__pyx_n_s_RuntimeError; static PyObject *__pyx_n_s_ValueError; static PyObject *__pyx_n_s_argsort; static PyObject *__pyx_n_s_boxes_dim; static PyObject *__pyx_n_s_boxes_num; static PyObject *__pyx_n_s_dets; static PyObject *__pyx_n_s_device_id; static PyObject *__pyx_n_s_dtype; static PyObject *__pyx_n_s_gpu_nms; static PyObject *__pyx_n_s_import; static PyObject *__pyx_n_s_int32; static PyObject *__pyx_n_s_keep; static PyObject *__pyx_n_s_main; static PyObject *__pyx_kp_u_ndarray_is_not_C_contiguous; static PyObject *__pyx_kp_u_ndarray_is_not_Fortran_contiguou; static PyObject *__pyx_n_s_nms_gpu_nms; static PyObject *__pyx_n_s_np; static PyObject *__pyx_n_s_num_out; static PyObject *__pyx_n_s_numpy; static PyObject *__pyx_n_s_order; static PyObject *__pyx_n_s_range; static PyObject *__pyx_n_s_scores; static PyObject *__pyx_n_s_sorted_dets; static PyObject *__pyx_n_s_test; static PyObject *__pyx_n_s_thresh; static PyObject *__pyx_kp_u_unknown_dtype_code_in_numpy_pxd; static PyObject *__pyx_n_s_zeros; static PyObject *__pyx_pf_3nms_7gpu_nms_gpu_nms(CYTHON_UNUSED PyObject *__pyx_self, PyArrayObject *__pyx_v_dets, PyObject *__pyx_v_thresh, __pyx_t_5numpy_int32_t __pyx_v_device_id); /* proto */ static int __pyx_pf_5numpy_7ndarray___getbuffer__(PyArrayObject *__pyx_v_self, Py_buffer *__pyx_v_info, int __pyx_v_flags); /* proto */ static void __pyx_pf_5numpy_7ndarray_2__releasebuffer__(PyArrayObject *__pyx_v_self, Py_buffer *__pyx_v_info); /* proto */ static PyObject *__pyx_int_4; static PyObject *__pyx_int_neg_1; static PyObject *__pyx_slice_; static PyObject *__pyx_slice__3; static PyObject *__pyx_slice__4; static PyObject *__pyx_tuple__2; static PyObject *__pyx_tuple__5; static PyObject *__pyx_tuple__6; static PyObject *__pyx_tuple__7; static PyObject *__pyx_tuple__8; static PyObject *__pyx_tuple__9; static PyObject *__pyx_tuple__10; static PyObject *__pyx_tuple__11; static PyObject *__pyx_codeobj__12; /* "nms/gpu_nms.pyx":16 * void _nms(np.int32_t*, int*, np.float32_t*, int, int, float, int) * * def gpu_nms(np.ndarray[np.float32_t, ndim=2] dets, np.float thresh, # <<<<<<<<<<<<<< * np.int32_t device_id=0): * cdef int boxes_num = dets.shape[0] */ /* Python wrapper */ static PyObject *__pyx_pw_3nms_7gpu_nms_1gpu_nms(PyObject *__pyx_self, PyObject *__pyx_args, PyObject *__pyx_kwds); /*proto*/ static PyMethodDef __pyx_mdef_3nms_7gpu_nms_1gpu_nms = {"gpu_nms", (PyCFunction)__pyx_pw_3nms_7gpu_nms_1gpu_nms, METH_VARARGS|METH_KEYWORDS, 0}; static PyObject *__pyx_pw_3nms_7gpu_nms_1gpu_nms(PyObject *__pyx_self, PyObject *__pyx_args, PyObject *__pyx_kwds) { PyArrayObject *__pyx_v_dets = 0; PyObject *__pyx_v_thresh = 0; __pyx_t_5numpy_int32_t __pyx_v_device_id; PyObject *__pyx_r = 0; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("gpu_nms (wrapper)", 0); { static PyObject **__pyx_pyargnames[] = {&__pyx_n_s_dets,&__pyx_n_s_thresh,&__pyx_n_s_device_id,0}; PyObject* values[3] = {0,0,0}; if (unlikely(__pyx_kwds)) { Py_ssize_t kw_args; const Py_ssize_t pos_args = PyTuple_GET_SIZE(__pyx_args); switch (pos_args) { case 3: values[2] = PyTuple_GET_ITEM(__pyx_args, 2); case 2: values[1] = PyTuple_GET_ITEM(__pyx_args, 1); case 1: values[0] = PyTuple_GET_ITEM(__pyx_args, 0); case 0: break; default: goto __pyx_L5_argtuple_error; } kw_args = PyDict_Size(__pyx_kwds); switch (pos_args) { case 0: if (likely((values[0] = PyDict_GetItem(__pyx_kwds, __pyx_n_s_dets)) != 0)) kw_args--; else goto __pyx_L5_argtuple_error; case 1: if (likely((values[1] = PyDict_GetItem(__pyx_kwds, __pyx_n_s_thresh)) != 0)) kw_args--; else { __Pyx_RaiseArgtupleInvalid("gpu_nms", 0, 2, 3, 1); __PYX_ERR(0, 16, __pyx_L3_error) } case 2: if (kw_args > 0) { PyObject* value = PyDict_GetItem(__pyx_kwds, __pyx_n_s_device_id); if (value) { values[2] = value; kw_args--; } } } if (unlikely(kw_args > 0)) { if (unlikely(__Pyx_ParseOptionalKeywords(__pyx_kwds, __pyx_pyargnames, 0, values, pos_args, "gpu_nms") < 0)) __PYX_ERR(0, 16, __pyx_L3_error) } } else { switch (PyTuple_GET_SIZE(__pyx_args)) { case 3: values[2] = PyTuple_GET_ITEM(__pyx_args, 2); case 2: values[1] = PyTuple_GET_ITEM(__pyx_args, 1); values[0] = PyTuple_GET_ITEM(__pyx_args, 0); break; default: goto __pyx_L5_argtuple_error; } } __pyx_v_dets = ((PyArrayObject *)values[0]); __pyx_v_thresh = ((PyObject*)values[1]); if (values[2]) { __pyx_v_device_id = __Pyx_PyInt_As_npy_int32(values[2]); if (unlikely((__pyx_v_device_id == (npy_int32)-1) && PyErr_Occurred())) __PYX_ERR(0, 17, __pyx_L3_error) } else { __pyx_v_device_id = ((__pyx_t_5numpy_int32_t)0); } } goto __pyx_L4_argument_unpacking_done; __pyx_L5_argtuple_error:; __Pyx_RaiseArgtupleInvalid("gpu_nms", 0, 2, 3, PyTuple_GET_SIZE(__pyx_args)); __PYX_ERR(0, 16, __pyx_L3_error) __pyx_L3_error:; __Pyx_AddTraceback("nms.gpu_nms.gpu_nms", __pyx_clineno, __pyx_lineno, __pyx_filename); __Pyx_RefNannyFinishContext(); return NULL; __pyx_L4_argument_unpacking_done:; if (unlikely(!__Pyx_ArgTypeTest(((PyObject *)__pyx_v_dets), __pyx_ptype_5numpy_ndarray, 1, "dets", 0))) __PYX_ERR(0, 16, __pyx_L1_error) if (unlikely(!__Pyx_ArgTypeTest(((PyObject *)__pyx_v_thresh), (&PyFloat_Type), 1, "thresh", 1))) __PYX_ERR(0, 16, __pyx_L1_error) __pyx_r = __pyx_pf_3nms_7gpu_nms_gpu_nms(__pyx_self, __pyx_v_dets, __pyx_v_thresh, __pyx_v_device_id); /* function exit code */ goto __pyx_L0; __pyx_L1_error:; __pyx_r = NULL; __pyx_L0:; __Pyx_RefNannyFinishContext(); return __pyx_r; } static PyObject *__pyx_pf_3nms_7gpu_nms_gpu_nms(CYTHON_UNUSED PyObject *__pyx_self, PyArrayObject *__pyx_v_dets, PyObject *__pyx_v_thresh, __pyx_t_5numpy_int32_t __pyx_v_device_id) { int __pyx_v_boxes_num; int __pyx_v_boxes_dim; int __pyx_v_num_out; PyArrayObject *__pyx_v_keep = 0; PyArrayObject *__pyx_v_scores = 0; PyArrayObject *__pyx_v_order = 0; PyArrayObject *__pyx_v_sorted_dets = 0; __Pyx_LocalBuf_ND __pyx_pybuffernd_dets; __Pyx_Buffer __pyx_pybuffer_dets; __Pyx_LocalBuf_ND __pyx_pybuffernd_keep; __Pyx_Buffer __pyx_pybuffer_keep; __Pyx_LocalBuf_ND __pyx_pybuffernd_order; __Pyx_Buffer __pyx_pybuffer_order; __Pyx_LocalBuf_ND __pyx_pybuffernd_scores; __Pyx_Buffer __pyx_pybuffer_scores; __Pyx_LocalBuf_ND __pyx_pybuffernd_sorted_dets; __Pyx_Buffer __pyx_pybuffer_sorted_dets; PyObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations PyObject *__pyx_t_1 = NULL; PyObject *__pyx_t_2 = NULL; PyObject *__pyx_t_3 = NULL; PyObject *__pyx_t_4 = NULL; PyObject *__pyx_t_5 = NULL; PyArrayObject *__pyx_t_6 = NULL; PyArrayObject *__pyx_t_7 = NULL; PyArrayObject *__pyx_t_8 = NULL; PyArrayObject *__pyx_t_9 = NULL; Py_ssize_t __pyx_t_10; int __pyx_t_11; Py_ssize_t __pyx_t_12; Py_ssize_t __pyx_t_13; float __pyx_t_14; PyObject *__pyx_t_15 = NULL; PyObject *__pyx_t_16 = NULL; PyObject *__pyx_t_17 = NULL; __Pyx_RefNannySetupContext("gpu_nms", 0); __pyx_pybuffer_keep.pybuffer.buf = NULL; __pyx_pybuffer_keep.refcount = 0; __pyx_pybuffernd_keep.data = NULL; __pyx_pybuffernd_keep.rcbuffer = &__pyx_pybuffer_keep; __pyx_pybuffer_scores.pybuffer.buf = NULL; __pyx_pybuffer_scores.refcount = 0; __pyx_pybuffernd_scores.data = NULL; __pyx_pybuffernd_scores.rcbuffer = &__pyx_pybuffer_scores; __pyx_pybuffer_order.pybuffer.buf = NULL; __pyx_pybuffer_order.refcount = 0; __pyx_pybuffernd_order.data = NULL; __pyx_pybuffernd_order.rcbuffer = &__pyx_pybuffer_order; __pyx_pybuffer_sorted_dets.pybuffer.buf = NULL; __pyx_pybuffer_sorted_dets.refcount = 0; __pyx_pybuffernd_sorted_dets.data = NULL; __pyx_pybuffernd_sorted_dets.rcbuffer = &__pyx_pybuffer_sorted_dets; __pyx_pybuffer_dets.pybuffer.buf = NULL; __pyx_pybuffer_dets.refcount = 0; __pyx_pybuffernd_dets.data = NULL; __pyx_pybuffernd_dets.rcbuffer = &__pyx_pybuffer_dets; { __Pyx_BufFmt_StackElem __pyx_stack[1]; if (unlikely(__Pyx_GetBufferAndValidate(&__pyx_pybuffernd_dets.rcbuffer->pybuffer, (PyObject*)__pyx_v_dets, &__Pyx_TypeInfo_nn___pyx_t_5numpy_float32_t, PyBUF_FORMAT| PyBUF_STRIDES, 2, 0, __pyx_stack) == -1)) __PYX_ERR(0, 16, __pyx_L1_error) } __pyx_pybuffernd_dets.diminfo[0].strides = __pyx_pybuffernd_dets.rcbuffer->pybuffer.strides[0]; __pyx_pybuffernd_dets.diminfo[0].shape = __pyx_pybuffernd_dets.rcbuffer->pybuffer.shape[0]; __pyx_pybuffernd_dets.diminfo[1].strides = __pyx_pybuffernd_dets.rcbuffer->pybuffer.strides[1]; __pyx_pybuffernd_dets.diminfo[1].shape = __pyx_pybuffernd_dets.rcbuffer->pybuffer.shape[1]; /* "nms/gpu_nms.pyx":18 * def gpu_nms(np.ndarray[np.float32_t, ndim=2] dets, np.float thresh, * np.int32_t device_id=0): * cdef int boxes_num = dets.shape[0] # <<<<<<<<<<<<<< * cdef int boxes_dim = dets.shape[1] * cdef int num_out */ __pyx_v_boxes_num = (__pyx_v_dets->dimensions[0]); /* "nms/gpu_nms.pyx":19 * np.int32_t device_id=0): * cdef int boxes_num = dets.shape[0] * cdef int boxes_dim = dets.shape[1] # <<<<<<<<<<<<<< * cdef int num_out * cdef np.ndarray[np.int32_t, ndim=1] \ */ __pyx_v_boxes_dim = (__pyx_v_dets->dimensions[1]); /* "nms/gpu_nms.pyx":22 * cdef int num_out * cdef np.ndarray[np.int32_t, ndim=1] \ * keep = np.zeros(boxes_num, dtype=np.int32) # <<<<<<<<<<<<<< * cdef np.ndarray[np.float32_t, ndim=1] \ * scores = dets[:, 4] */ __pyx_t_1 = __Pyx_GetModuleGlobalName(__pyx_n_s_np); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 22, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __pyx_t_2 = __Pyx_PyObject_GetAttrStr(__pyx_t_1, __pyx_n_s_zeros); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 22, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_2); __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; __pyx_t_1 = __Pyx_PyInt_From_int(__pyx_v_boxes_num); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 22, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __pyx_t_3 = PyTuple_New(1); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 22, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __Pyx_GIVEREF(__pyx_t_1); PyTuple_SET_ITEM(__pyx_t_3, 0, __pyx_t_1); __pyx_t_1 = 0; __pyx_t_1 = PyDict_New(); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 22, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __pyx_t_4 = __Pyx_GetModuleGlobalName(__pyx_n_s_np); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 22, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_4); __pyx_t_5 = __Pyx_PyObject_GetAttrStr(__pyx_t_4, __pyx_n_s_int32); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 22, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_5); __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; if (PyDict_SetItem(__pyx_t_1, __pyx_n_s_dtype, __pyx_t_5) < 0) __PYX_ERR(0, 22, __pyx_L1_error) __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0; __pyx_t_5 = __Pyx_PyObject_Call(__pyx_t_2, __pyx_t_3, __pyx_t_1); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 22, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_5); __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; if (!(likely(((__pyx_t_5) == Py_None) || likely(__Pyx_TypeTest(__pyx_t_5, __pyx_ptype_5numpy_ndarray))))) __PYX_ERR(0, 22, __pyx_L1_error) __pyx_t_6 = ((PyArrayObject *)__pyx_t_5); { __Pyx_BufFmt_StackElem __pyx_stack[1]; if (unlikely(__Pyx_GetBufferAndValidate(&__pyx_pybuffernd_keep.rcbuffer->pybuffer, (PyObject*)__pyx_t_6, &__Pyx_TypeInfo_nn___pyx_t_5numpy_int32_t, PyBUF_FORMAT| PyBUF_STRIDES, 1, 0, __pyx_stack) == -1)) { __pyx_v_keep = ((PyArrayObject *)Py_None); __Pyx_INCREF(Py_None); __pyx_pybuffernd_keep.rcbuffer->pybuffer.buf = NULL; __PYX_ERR(0, 21, __pyx_L1_error) } else {__pyx_pybuffernd_keep.diminfo[0].strides = __pyx_pybuffernd_keep.rcbuffer->pybuffer.strides[0]; __pyx_pybuffernd_keep.diminfo[0].shape = __pyx_pybuffernd_keep.rcbuffer->pybuffer.shape[0]; } } __pyx_t_6 = 0; __pyx_v_keep = ((PyArrayObject *)__pyx_t_5); __pyx_t_5 = 0; /* "nms/gpu_nms.pyx":24 * keep = np.zeros(boxes_num, dtype=np.int32) * cdef np.ndarray[np.float32_t, ndim=1] \ * scores = dets[:, 4] # <<<<<<<<<<<<<< * #cdef np.ndarray[np.int_t, ndim=1] \ // 20160601, by xzn * # order = scores.argsort()[::-1] */ __pyx_t_5 = PyObject_GetItem(((PyObject *)__pyx_v_dets), __pyx_tuple__2); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 24, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_5); if (!(likely(((__pyx_t_5) == Py_None) || likely(__Pyx_TypeTest(__pyx_t_5, __pyx_ptype_5numpy_ndarray))))) __PYX_ERR(0, 24, __pyx_L1_error) __pyx_t_7 = ((PyArrayObject *)__pyx_t_5); { __Pyx_BufFmt_StackElem __pyx_stack[1]; if (unlikely(__Pyx_GetBufferAndValidate(&__pyx_pybuffernd_scores.rcbuffer->pybuffer, (PyObject*)__pyx_t_7, &__Pyx_TypeInfo_nn___pyx_t_5numpy_float32_t, PyBUF_FORMAT| PyBUF_STRIDES, 1, 0, __pyx_stack) == -1)) { __pyx_v_scores = ((PyArrayObject *)Py_None); __Pyx_INCREF(Py_None); __pyx_pybuffernd_scores.rcbuffer->pybuffer.buf = NULL; __PYX_ERR(0, 23, __pyx_L1_error) } else {__pyx_pybuffernd_scores.diminfo[0].strides = __pyx_pybuffernd_scores.rcbuffer->pybuffer.strides[0]; __pyx_pybuffernd_scores.diminfo[0].shape = __pyx_pybuffernd_scores.rcbuffer->pybuffer.shape[0]; } } __pyx_t_7 = 0; __pyx_v_scores = ((PyArrayObject *)__pyx_t_5); __pyx_t_5 = 0; /* "nms/gpu_nms.pyx":28 * # order = scores.argsort()[::-1] * cdef np.ndarray[np.intp_t, ndim=1] \ * order = scores.argsort()[::-1] # <<<<<<<<<<<<<< * cdef np.ndarray[np.float32_t, ndim=2] \ * sorted_dets = dets[order, :] */ __pyx_t_1 = __Pyx_PyObject_GetAttrStr(((PyObject *)__pyx_v_scores), __pyx_n_s_argsort); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 28, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __pyx_t_3 = NULL; if (CYTHON_COMPILING_IN_CPYTHON && likely(PyMethod_Check(__pyx_t_1))) { __pyx_t_3 = PyMethod_GET_SELF(__pyx_t_1); if (likely(__pyx_t_3)) { PyObject* function = PyMethod_GET_FUNCTION(__pyx_t_1); __Pyx_INCREF(__pyx_t_3); __Pyx_INCREF(function); __Pyx_DECREF_SET(__pyx_t_1, function); } } if (__pyx_t_3) { __pyx_t_5 = __Pyx_PyObject_CallOneArg(__pyx_t_1, __pyx_t_3); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 28, __pyx_L1_error) __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; } else { __pyx_t_5 = __Pyx_PyObject_CallNoArg(__pyx_t_1); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 28, __pyx_L1_error) } __Pyx_GOTREF(__pyx_t_5); __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; __pyx_t_1 = PyObject_GetItem(__pyx_t_5, __pyx_slice__3); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 28, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0; if (!(likely(((__pyx_t_1) == Py_None) || likely(__Pyx_TypeTest(__pyx_t_1, __pyx_ptype_5numpy_ndarray))))) __PYX_ERR(0, 28, __pyx_L1_error) __pyx_t_8 = ((PyArrayObject *)__pyx_t_1); { __Pyx_BufFmt_StackElem __pyx_stack[1]; if (unlikely(__Pyx_GetBufferAndValidate(&__pyx_pybuffernd_order.rcbuffer->pybuffer, (PyObject*)__pyx_t_8, &__Pyx_TypeInfo_nn___pyx_t_5numpy_intp_t, PyBUF_FORMAT| PyBUF_STRIDES, 1, 0, __pyx_stack) == -1)) { __pyx_v_order = ((PyArrayObject *)Py_None); __Pyx_INCREF(Py_None); __pyx_pybuffernd_order.rcbuffer->pybuffer.buf = NULL; __PYX_ERR(0, 27, __pyx_L1_error) } else {__pyx_pybuffernd_order.diminfo[0].strides = __pyx_pybuffernd_order.rcbuffer->pybuffer.strides[0]; __pyx_pybuffernd_order.diminfo[0].shape = __pyx_pybuffernd_order.rcbuffer->pybuffer.shape[0]; } } __pyx_t_8 = 0; __pyx_v_order = ((PyArrayObject *)__pyx_t_1); __pyx_t_1 = 0; /* "nms/gpu_nms.pyx":30 * order = scores.argsort()[::-1] * cdef np.ndarray[np.float32_t, ndim=2] \ * sorted_dets = dets[order, :] # <<<<<<<<<<<<<< * _nms(&keep[0], &num_out, &sorted_dets[0, 0], boxes_num, boxes_dim, thresh, device_id) * keep = keep[:num_out] */ __pyx_t_1 = PyTuple_New(2); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 30, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __Pyx_INCREF(((PyObject *)__pyx_v_order)); __Pyx_GIVEREF(((PyObject *)__pyx_v_order)); PyTuple_SET_ITEM(__pyx_t_1, 0, ((PyObject *)__pyx_v_order)); __Pyx_INCREF(__pyx_slice__4); __Pyx_GIVEREF(__pyx_slice__4); PyTuple_SET_ITEM(__pyx_t_1, 1, __pyx_slice__4); __pyx_t_5 = PyObject_GetItem(((PyObject *)__pyx_v_dets), __pyx_t_1); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 30, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_5); __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; if (!(likely(((__pyx_t_5) == Py_None) || likely(__Pyx_TypeTest(__pyx_t_5, __pyx_ptype_5numpy_ndarray))))) __PYX_ERR(0, 30, __pyx_L1_error) __pyx_t_9 = ((PyArrayObject *)__pyx_t_5); { __Pyx_BufFmt_StackElem __pyx_stack[1]; if (unlikely(__Pyx_GetBufferAndValidate(&__pyx_pybuffernd_sorted_dets.rcbuffer->pybuffer, (PyObject*)__pyx_t_9, &__Pyx_TypeInfo_nn___pyx_t_5numpy_float32_t, PyBUF_FORMAT| PyBUF_STRIDES, 2, 0, __pyx_stack) == -1)) { __pyx_v_sorted_dets = ((PyArrayObject *)Py_None); __Pyx_INCREF(Py_None); __pyx_pybuffernd_sorted_dets.rcbuffer->pybuffer.buf = NULL; __PYX_ERR(0, 29, __pyx_L1_error) } else {__pyx_pybuffernd_sorted_dets.diminfo[0].strides = __pyx_pybuffernd_sorted_dets.rcbuffer->pybuffer.strides[0]; __pyx_pybuffernd_sorted_dets.diminfo[0].shape = __pyx_pybuffernd_sorted_dets.rcbuffer->pybuffer.shape[0]; __pyx_pybuffernd_sorted_dets.diminfo[1].strides = __pyx_pybuffernd_sorted_dets.rcbuffer->pybuffer.strides[1]; __pyx_pybuffernd_sorted_dets.diminfo[1].shape = __pyx_pybuffernd_sorted_dets.rcbuffer->pybuffer.shape[1]; } } __pyx_t_9 = 0; __pyx_v_sorted_dets = ((PyArrayObject *)__pyx_t_5); __pyx_t_5 = 0; /* "nms/gpu_nms.pyx":31 * cdef np.ndarray[np.float32_t, ndim=2] \ * sorted_dets = dets[order, :] * _nms(&keep[0], &num_out, &sorted_dets[0, 0], boxes_num, boxes_dim, thresh, device_id) # <<<<<<<<<<<<<< * keep = keep[:num_out] * return list(order[keep]) */ __pyx_t_10 = 0; __pyx_t_11 = -1; if (__pyx_t_10 < 0) { __pyx_t_10 += __pyx_pybuffernd_keep.diminfo[0].shape; if (unlikely(__pyx_t_10 < 0)) __pyx_t_11 = 0; } else if (unlikely(__pyx_t_10 >= __pyx_pybuffernd_keep.diminfo[0].shape)) __pyx_t_11 = 0; if (unlikely(__pyx_t_11 != -1)) { __Pyx_RaiseBufferIndexError(__pyx_t_11); __PYX_ERR(0, 31, __pyx_L1_error) } __pyx_t_12 = 0; __pyx_t_13 = 0; __pyx_t_11 = -1; if (__pyx_t_12 < 0) { __pyx_t_12 += __pyx_pybuffernd_sorted_dets.diminfo[0].shape; if (unlikely(__pyx_t_12 < 0)) __pyx_t_11 = 0; } else if (unlikely(__pyx_t_12 >= __pyx_pybuffernd_sorted_dets.diminfo[0].shape)) __pyx_t_11 = 0; if (__pyx_t_13 < 0) { __pyx_t_13 += __pyx_pybuffernd_sorted_dets.diminfo[1].shape; if (unlikely(__pyx_t_13 < 0)) __pyx_t_11 = 1; } else if (unlikely(__pyx_t_13 >= __pyx_pybuffernd_sorted_dets.diminfo[1].shape)) __pyx_t_11 = 1; if (unlikely(__pyx_t_11 != -1)) { __Pyx_RaiseBufferIndexError(__pyx_t_11); __PYX_ERR(0, 31, __pyx_L1_error) } __pyx_t_14 = __pyx_PyFloat_AsFloat(__pyx_v_thresh); if (unlikely((__pyx_t_14 == (float)-1) && PyErr_Occurred())) __PYX_ERR(0, 31, __pyx_L1_error) _nms((&(*__Pyx_BufPtrStrided1d(__pyx_t_5numpy_int32_t *, __pyx_pybuffernd_keep.rcbuffer->pybuffer.buf, __pyx_t_10, __pyx_pybuffernd_keep.diminfo[0].strides))), (&__pyx_v_num_out), (&(*__Pyx_BufPtrStrided2d(__pyx_t_5numpy_float32_t *, __pyx_pybuffernd_sorted_dets.rcbuffer->pybuffer.buf, __pyx_t_12, __pyx_pybuffernd_sorted_dets.diminfo[0].strides, __pyx_t_13, __pyx_pybuffernd_sorted_dets.diminfo[1].strides))), __pyx_v_boxes_num, __pyx_v_boxes_dim, __pyx_t_14, __pyx_v_device_id); /* "nms/gpu_nms.pyx":32 * sorted_dets = dets[order, :] * _nms(&keep[0], &num_out, &sorted_dets[0, 0], boxes_num, boxes_dim, thresh, device_id) * keep = keep[:num_out] # <<<<<<<<<<<<<< * return list(order[keep]) */ __pyx_t_5 = __Pyx_PyObject_GetSlice(((PyObject *)__pyx_v_keep), 0, __pyx_v_num_out, NULL, NULL, NULL, 0, 1, 1); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 32, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_5); if (!(likely(((__pyx_t_5) == Py_None) || likely(__Pyx_TypeTest(__pyx_t_5, __pyx_ptype_5numpy_ndarray))))) __PYX_ERR(0, 32, __pyx_L1_error) __pyx_t_6 = ((PyArrayObject *)__pyx_t_5); { __Pyx_BufFmt_StackElem __pyx_stack[1]; __Pyx_SafeReleaseBuffer(&__pyx_pybuffernd_keep.rcbuffer->pybuffer); __pyx_t_11 = __Pyx_GetBufferAndValidate(&__pyx_pybuffernd_keep.rcbuffer->pybuffer, (PyObject*)__pyx_t_6, &__Pyx_TypeInfo_nn___pyx_t_5numpy_int32_t, PyBUF_FORMAT| PyBUF_STRIDES, 1, 0, __pyx_stack); if (unlikely(__pyx_t_11 < 0)) { PyErr_Fetch(&__pyx_t_15, &__pyx_t_16, &__pyx_t_17); if (unlikely(__Pyx_GetBufferAndValidate(&__pyx_pybuffernd_keep.rcbuffer->pybuffer, (PyObject*)__pyx_v_keep, &__Pyx_TypeInfo_nn___pyx_t_5numpy_int32_t, PyBUF_FORMAT| PyBUF_STRIDES, 1, 0, __pyx_stack) == -1)) { Py_XDECREF(__pyx_t_15); Py_XDECREF(__pyx_t_16); Py_XDECREF(__pyx_t_17); __Pyx_RaiseBufferFallbackError(); } else { PyErr_Restore(__pyx_t_15, __pyx_t_16, __pyx_t_17); } } __pyx_pybuffernd_keep.diminfo[0].strides = __pyx_pybuffernd_keep.rcbuffer->pybuffer.strides[0]; __pyx_pybuffernd_keep.diminfo[0].shape = __pyx_pybuffernd_keep.rcbuffer->pybuffer.shape[0]; if (unlikely(__pyx_t_11 < 0)) __PYX_ERR(0, 32, __pyx_L1_error) } __pyx_t_6 = 0; __Pyx_DECREF_SET(__pyx_v_keep, ((PyArrayObject *)__pyx_t_5)); __pyx_t_5 = 0; /* "nms/gpu_nms.pyx":33 * _nms(&keep[0], &num_out, &sorted_dets[0, 0], boxes_num, boxes_dim, thresh, device_id) * keep = keep[:num_out] * return list(order[keep]) # <<<<<<<<<<<<<< */ __Pyx_XDECREF(__pyx_r); __pyx_t_5 = PyObject_GetItem(((PyObject *)__pyx_v_order), ((PyObject *)__pyx_v_keep)); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 33, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_5); __pyx_t_1 = PySequence_List(__pyx_t_5); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 33, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0; __pyx_r = __pyx_t_1; __pyx_t_1 = 0; goto __pyx_L0; /* "nms/gpu_nms.pyx":16 * void _nms(np.int32_t*, int*, np.float32_t*, int, int, float, int) * * def gpu_nms(np.ndarray[np.float32_t, ndim=2] dets, np.float thresh, # <<<<<<<<<<<<<< * np.int32_t device_id=0): * cdef int boxes_num = dets.shape[0] */ /* function exit code */ __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_1); __Pyx_XDECREF(__pyx_t_2); __Pyx_XDECREF(__pyx_t_3); __Pyx_XDECREF(__pyx_t_4); __Pyx_XDECREF(__pyx_t_5); { PyObject *__pyx_type, *__pyx_value, *__pyx_tb; __Pyx_PyThreadState_declare __Pyx_PyThreadState_assign __Pyx_ErrFetch(&__pyx_type, &__pyx_value, &__pyx_tb); __Pyx_SafeReleaseBuffer(&__pyx_pybuffernd_dets.rcbuffer->pybuffer); __Pyx_SafeReleaseBuffer(&__pyx_pybuffernd_keep.rcbuffer->pybuffer); __Pyx_SafeReleaseBuffer(&__pyx_pybuffernd_order.rcbuffer->pybuffer); __Pyx_SafeReleaseBuffer(&__pyx_pybuffernd_scores.rcbuffer->pybuffer); __Pyx_SafeReleaseBuffer(&__pyx_pybuffernd_sorted_dets.rcbuffer->pybuffer); __Pyx_ErrRestore(__pyx_type, __pyx_value, __pyx_tb);} __Pyx_AddTraceback("nms.gpu_nms.gpu_nms", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = NULL; goto __pyx_L2; __pyx_L0:; __Pyx_SafeReleaseBuffer(&__pyx_pybuffernd_dets.rcbuffer->pybuffer); __Pyx_SafeReleaseBuffer(&__pyx_pybuffernd_keep.rcbuffer->pybuffer); __Pyx_SafeReleaseBuffer(&__pyx_pybuffernd_order.rcbuffer->pybuffer); __Pyx_SafeReleaseBuffer(&__pyx_pybuffernd_scores.rcbuffer->pybuffer); __Pyx_SafeReleaseBuffer(&__pyx_pybuffernd_sorted_dets.rcbuffer->pybuffer); __pyx_L2:; __Pyx_XDECREF((PyObject *)__pyx_v_keep); __Pyx_XDECREF((PyObject *)__pyx_v_scores); __Pyx_XDECREF((PyObject *)__pyx_v_order); __Pyx_XDECREF((PyObject *)__pyx_v_sorted_dets); __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "C:/Anaconda2/lib/site-packages/Cython/Includes/numpy/__init__.pxd":197 * # experimental exception made for __getbuffer__ and __releasebuffer__ * # -- the details of this may change. * def __getbuffer__(ndarray self, Py_buffer* info, int flags): # <<<<<<<<<<<<<< * # This implementation of getbuffer is geared towards Cython * # requirements, and does not yet fullfill the PEP. */ /* Python wrapper */ static CYTHON_UNUSED int __pyx_pw_5numpy_7ndarray_1__getbuffer__(PyObject *__pyx_v_self, Py_buffer *__pyx_v_info, int __pyx_v_flags); /*proto*/ static CYTHON_UNUSED int __pyx_pw_5numpy_7ndarray_1__getbuffer__(PyObject *__pyx_v_self, Py_buffer *__pyx_v_info, int __pyx_v_flags) { int __pyx_r; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("__getbuffer__ (wrapper)", 0); __pyx_r = __pyx_pf_5numpy_7ndarray___getbuffer__(((PyArrayObject *)__pyx_v_self), ((Py_buffer *)__pyx_v_info), ((int)__pyx_v_flags)); /* function exit code */ __Pyx_RefNannyFinishContext(); return __pyx_r; } static int __pyx_pf_5numpy_7ndarray___getbuffer__(PyArrayObject *__pyx_v_self, Py_buffer *__pyx_v_info, int __pyx_v_flags) { int __pyx_v_copy_shape; int __pyx_v_i; int __pyx_v_ndim; int __pyx_v_endian_detector; int __pyx_v_little_endian; int __pyx_v_t; char *__pyx_v_f; PyArray_Descr *__pyx_v_descr = 0; int __pyx_v_offset; int __pyx_v_hasfields; int __pyx_r; __Pyx_RefNannyDeclarations int __pyx_t_1; int __pyx_t_2; PyObject *__pyx_t_3 = NULL; int __pyx_t_4; int __pyx_t_5; PyObject *__pyx_t_6 = NULL; char *__pyx_t_7; __Pyx_RefNannySetupContext("__getbuffer__", 0); if (__pyx_v_info != NULL) { __pyx_v_info->obj = Py_None; __Pyx_INCREF(Py_None); __Pyx_GIVEREF(__pyx_v_info->obj); } /* "C:/Anaconda2/lib/site-packages/Cython/Includes/numpy/__init__.pxd":203 * # of flags * * if info == NULL: return # <<<<<<<<<<<<<< * * cdef int copy_shape, i, ndim */ __pyx_t_1 = ((__pyx_v_info == NULL) != 0); if (__pyx_t_1) { __pyx_r = 0; goto __pyx_L0; } /* "C:/Anaconda2/lib/site-packages/Cython/Includes/numpy/__init__.pxd":206 * * cdef int copy_shape, i, ndim * cdef int endian_detector = 1 # <<<<<<<<<<<<<< * cdef bint little_endian = ((<char*>&endian_detector)[0] != 0) * */ __pyx_v_endian_detector = 1; /* "C:/Anaconda2/lib/site-packages/Cython/Includes/numpy/__init__.pxd":207 * cdef int copy_shape, i, ndim * cdef int endian_detector = 1 * cdef bint little_endian = ((<char*>&endian_detector)[0] != 0) # <<<<<<<<<<<<<< * * ndim = PyArray_NDIM(self) */ __pyx_v_little_endian = ((((char *)(&__pyx_v_endian_detector))[0]) != 0); /* "C:/Anaconda2/lib/site-packages/Cython/Includes/numpy/__init__.pxd":209 * cdef bint little_endian = ((<char*>&endian_detector)[0] != 0) * * ndim = PyArray_NDIM(self) # <<<<<<<<<<<<<< * * if sizeof(npy_intp) != sizeof(Py_ssize_t): */ __pyx_v_ndim = PyArray_NDIM(__pyx_v_self); /* "C:/Anaconda2/lib/site-packages/Cython/Includes/numpy/__init__.pxd":211 * ndim = PyArray_NDIM(self) * * if sizeof(npy_intp) != sizeof(Py_ssize_t): # <<<<<<<<<<<<<< * copy_shape = 1 * else: */ __pyx_t_1 = (((sizeof(npy_intp)) != (sizeof(Py_ssize_t))) != 0); if (__pyx_t_1) { /* "C:/Anaconda2/lib/site-packages/Cython/Includes/numpy/__init__.pxd":212 * * if sizeof(npy_intp) != sizeof(Py_ssize_t): * copy_shape = 1 # <<<<<<<<<<<<<< * else: * copy_shape = 0 */ __pyx_v_copy_shape = 1; /* "C:/Anaconda2/lib/site-packages/Cython/Includes/numpy/__init__.pxd":211 * ndim = PyArray_NDIM(self) * * if sizeof(npy_intp) != sizeof(Py_ssize_t): # <<<<<<<<<<<<<< * copy_shape = 1 * else: */ goto __pyx_L4; } /* "C:/Anaconda2/lib/site-packages/Cython/Includes/numpy/__init__.pxd":214 * copy_shape = 1 * else: * copy_shape = 0 # <<<<<<<<<<<<<< * * if ((flags & pybuf.PyBUF_C_CONTIGUOUS == pybuf.PyBUF_C_CONTIGUOUS) */ /*else*/ { __pyx_v_copy_shape = 0; } __pyx_L4:; /* "C:/Anaconda2/lib/site-packages/Cython/Includes/numpy/__init__.pxd":216 * copy_shape = 0 * * if ((flags & pybuf.PyBUF_C_CONTIGUOUS == pybuf.PyBUF_C_CONTIGUOUS) # <<<<<<<<<<<<<< * and not PyArray_CHKFLAGS(self, NPY_C_CONTIGUOUS)): * raise ValueError(u"ndarray is not C contiguous") */ __pyx_t_2 = (((__pyx_v_flags & PyBUF_C_CONTIGUOUS) == PyBUF_C_CONTIGUOUS) != 0); if (__pyx_t_2) { } else { __pyx_t_1 = __pyx_t_2; goto __pyx_L6_bool_binop_done; } /* "C:/Anaconda2/lib/site-packages/Cython/Includes/numpy/__init__.pxd":217 * * if ((flags & pybuf.PyBUF_C_CONTIGUOUS == pybuf.PyBUF_C_CONTIGUOUS) * and not PyArray_CHKFLAGS(self, NPY_C_CONTIGUOUS)): # <<<<<<<<<<<<<< * raise ValueError(u"ndarray is not C contiguous") * */ __pyx_t_2 = ((!(PyArray_CHKFLAGS(__pyx_v_self, NPY_C_CONTIGUOUS) != 0)) != 0); __pyx_t_1 = __pyx_t_2; __pyx_L6_bool_binop_done:; /* "C:/Anaconda2/lib/site-packages/Cython/Includes/numpy/__init__.pxd":216 * copy_shape = 0 * * if ((flags & pybuf.PyBUF_C_CONTIGUOUS == pybuf.PyBUF_C_CONTIGUOUS) # <<<<<<<<<<<<<< * and not PyArray_CHKFLAGS(self, NPY_C_CONTIGUOUS)): * raise ValueError(u"ndarray is not C contiguous") */ if (__pyx_t_1) { /* "C:/Anaconda2/lib/site-packages/Cython/Includes/numpy/__init__.pxd":218 * if ((flags & pybuf.PyBUF_C_CONTIGUOUS == pybuf.PyBUF_C_CONTIGUOUS) * and not PyArray_CHKFLAGS(self, NPY_C_CONTIGUOUS)): * raise ValueError(u"ndarray is not C contiguous") # <<<<<<<<<<<<<< * * if ((flags & pybuf.PyBUF_F_CONTIGUOUS == pybuf.PyBUF_F_CONTIGUOUS) */ __pyx_t_3 = __Pyx_PyObject_Call(__pyx_builtin_ValueError, __pyx_tuple__5, NULL); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 218, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __Pyx_Raise(__pyx_t_3, 0, 0, 0); __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; __PYX_ERR(1, 218, __pyx_L1_error) /* "C:/Anaconda2/lib/site-packages/Cython/Includes/numpy/__init__.pxd":216 * copy_shape = 0 * * if ((flags & pybuf.PyBUF_C_CONTIGUOUS == pybuf.PyBUF_C_CONTIGUOUS) # <<<<<<<<<<<<<< * and not PyArray_CHKFLAGS(self, NPY_C_CONTIGUOUS)): * raise ValueError(u"ndarray is not C contiguous") */ } /* "C:/Anaconda2/lib/site-packages/Cython/Includes/numpy/__init__.pxd":220 * raise ValueError(u"ndarray is not C contiguous") * * if ((flags & pybuf.PyBUF_F_CONTIGUOUS == pybuf.PyBUF_F_CONTIGUOUS) # <<<<<<<<<<<<<< * and not PyArray_CHKFLAGS(self, NPY_F_CONTIGUOUS)): * raise ValueError(u"ndarray is not Fortran contiguous") */ __pyx_t_2 = (((__pyx_v_flags & PyBUF_F_CONTIGUOUS) == PyBUF_F_CONTIGUOUS) != 0); if (__pyx_t_2) { } else { __pyx_t_1 = __pyx_t_2; goto __pyx_L9_bool_binop_done; } /* "C:/Anaconda2/lib/site-packages/Cython/Includes/numpy/__init__.pxd":221 * * if ((flags & pybuf.PyBUF_F_CONTIGUOUS == pybuf.PyBUF_F_CONTIGUOUS) * and not PyArray_CHKFLAGS(self, NPY_F_CONTIGUOUS)): # <<<<<<<<<<<<<< * raise ValueError(u"ndarray is not Fortran contiguous") * */ __pyx_t_2 = ((!(PyArray_CHKFLAGS(__pyx_v_self, NPY_F_CONTIGUOUS) != 0)) != 0); __pyx_t_1 = __pyx_t_2; __pyx_L9_bool_binop_done:; /* "C:/Anaconda2/lib/site-packages/Cython/Includes/numpy/__init__.pxd":220 * raise ValueError(u"ndarray is not C contiguous") * * if ((flags & pybuf.PyBUF_F_CONTIGUOUS == pybuf.PyBUF_F_CONTIGUOUS) # <<<<<<<<<<<<<< * and not PyArray_CHKFLAGS(self, NPY_F_CONTIGUOUS)): * raise ValueError(u"ndarray is not Fortran contiguous") */ if (__pyx_t_1) { /* "C:/Anaconda2/lib/site-packages/Cython/Includes/numpy/__init__.pxd":222 * if ((flags & pybuf.PyBUF_F_CONTIGUOUS == pybuf.PyBUF_F_CONTIGUOUS) * and not PyArray_CHKFLAGS(self, NPY_F_CONTIGUOUS)): * raise ValueError(u"ndarray is not Fortran contiguous") # <<<<<<<<<<<<<< * * info.buf = PyArray_DATA(self) */ __pyx_t_3 = __Pyx_PyObject_Call(__pyx_builtin_ValueError, __pyx_tuple__6, NULL); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 222, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __Pyx_Raise(__pyx_t_3, 0, 0, 0); __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; __PYX_ERR(1, 222, __pyx_L1_error) /* "C:/Anaconda2/lib/site-packages/Cython/Includes/numpy/__init__.pxd":220 * raise ValueError(u"ndarray is not C contiguous") * * if ((flags & pybuf.PyBUF_F_CONTIGUOUS == pybuf.PyBUF_F_CONTIGUOUS) # <<<<<<<<<<<<<< * and not PyArray_CHKFLAGS(self, NPY_F_CONTIGUOUS)): * raise ValueError(u"ndarray is not Fortran contiguous") */ } /* "C:/Anaconda2/lib/site-packages/Cython/Includes/numpy/__init__.pxd":224 * raise ValueError(u"ndarray is not Fortran contiguous") * * info.buf = PyArray_DATA(self) # <<<<<<<<<<<<<< * info.ndim = ndim * if copy_shape: */ __pyx_v_info->buf = PyArray_DATA(__pyx_v_self); /* "C:/Anaconda2/lib/site-packages/Cython/Includes/numpy/__init__.pxd":225 * * info.buf = PyArray_DATA(self) * info.ndim = ndim # <<<<<<<<<<<<<< * if copy_shape: * # Allocate new buffer for strides and shape info. */ __pyx_v_info->ndim = __pyx_v_ndim; /* "C:/Anaconda2/lib/site-packages/Cython/Includes/numpy/__init__.pxd":226 * info.buf = PyArray_DATA(self) * info.ndim = ndim * if copy_shape: # <<<<<<<<<<<<<< * # Allocate new buffer for strides and shape info. * # This is allocated as one block, strides first. */ __pyx_t_1 = (__pyx_v_copy_shape != 0); if (__pyx_t_1) { /* "C:/Anaconda2/lib/site-packages/Cython/Includes/numpy/__init__.pxd":229 * # Allocate new buffer for strides and shape info. * # This is allocated as one block, strides first. * info.strides = <Py_ssize_t*>stdlib.malloc(sizeof(Py_ssize_t) * <size_t>ndim * 2) # <<<<<<<<<<<<<< * info.shape = info.strides + ndim * for i in range(ndim): */ __pyx_v_info->strides = ((Py_ssize_t *)malloc((((sizeof(Py_ssize_t)) * ((size_t)__pyx_v_ndim)) * 2))); /* "C:/Anaconda2/lib/site-packages/Cython/Includes/numpy/__init__.pxd":230 * # This is allocated as one block, strides first. * info.strides = <Py_ssize_t*>stdlib.malloc(sizeof(Py_ssize_t) * <size_t>ndim * 2) * info.shape = info.strides + ndim # <<<<<<<<<<<<<< * for i in range(ndim): * info.strides[i] = PyArray_STRIDES(self)[i] */ __pyx_v_info->shape = (__pyx_v_info->strides + __pyx_v_ndim); /* "C:/Anaconda2/lib/site-packages/Cython/Includes/numpy/__init__.pxd":231 * info.strides = <Py_ssize_t*>stdlib.malloc(sizeof(Py_ssize_t) * <size_t>ndim * 2) * info.shape = info.strides + ndim * for i in range(ndim): # <<<<<<<<<<<<<< * info.strides[i] = PyArray_STRIDES(self)[i] * info.shape[i] = PyArray_DIMS(self)[i] */ __pyx_t_4 = __pyx_v_ndim; for (__pyx_t_5 = 0; __pyx_t_5 < __pyx_t_4; __pyx_t_5+=1) { __pyx_v_i = __pyx_t_5; /* "C:/Anaconda2/lib/site-packages/Cython/Includes/numpy/__init__.pxd":232 * info.shape = info.strides + ndim * for i in range(ndim): * info.strides[i] = PyArray_STRIDES(self)[i] # <<<<<<<<<<<<<< * info.shape[i] = PyArray_DIMS(self)[i] * else: */ (__pyx_v_info->strides[__pyx_v_i]) = (PyArray_STRIDES(__pyx_v_self)[__pyx_v_i]); /* "C:/Anaconda2/lib/site-packages/Cython/Includes/numpy/__init__.pxd":233 * for i in range(ndim): * info.strides[i] = PyArray_STRIDES(self)[i] * info.shape[i] = PyArray_DIMS(self)[i] # <<<<<<<<<<<<<< * else: * info.strides = <Py_ssize_t*>PyArray_STRIDES(self) */ (__pyx_v_info->shape[__pyx_v_i]) = (PyArray_DIMS(__pyx_v_self)[__pyx_v_i]); } /* "C:/Anaconda2/lib/site-packages/Cython/Includes/numpy/__init__.pxd":226 * info.buf = PyArray_DATA(self) * info.ndim = ndim * if copy_shape: # <<<<<<<<<<<<<< * # Allocate new buffer for strides and shape info. * # This is allocated as one block, strides first. */ goto __pyx_L11; } /* "C:/Anaconda2/lib/site-packages/Cython/Includes/numpy/__init__.pxd":235 * info.shape[i] = PyArray_DIMS(self)[i] * else: * info.strides = <Py_ssize_t*>PyArray_STRIDES(self) # <<<<<<<<<<<<<< * info.shape = <Py_ssize_t*>PyArray_DIMS(self) * info.suboffsets = NULL */ /*else*/ { __pyx_v_info->strides = ((Py_ssize_t *)PyArray_STRIDES(__pyx_v_self)); /* "C:/Anaconda2/lib/site-packages/Cython/Includes/numpy/__init__.pxd":236 * else: * info.strides = <Py_ssize_t*>PyArray_STRIDES(self) * info.shape = <Py_ssize_t*>PyArray_DIMS(self) # <<<<<<<<<<<<<< * info.suboffsets = NULL * info.itemsize = PyArray_ITEMSIZE(self) */ __pyx_v_info->shape = ((Py_ssize_t *)PyArray_DIMS(__pyx_v_self)); } __pyx_L11:; /* "C:/Anaconda2/lib/site-packages/Cython/Includes/numpy/__init__.pxd":237 * info.strides = <Py_ssize_t*>PyArray_STRIDES(self) * info.shape = <Py_ssize_t*>PyArray_DIMS(self) * info.suboffsets = NULL # <<<<<<<<<<<<<< * info.itemsize = PyArray_ITEMSIZE(self) * info.readonly = not PyArray_ISWRITEABLE(self) */ __pyx_v_info->suboffsets = NULL; /* "C:/Anaconda2/lib/site-packages/Cython/Includes/numpy/__init__.pxd":238 * info.shape = <Py_ssize_t*>PyArray_DIMS(self) * info.suboffsets = NULL * info.itemsize = PyArray_ITEMSIZE(self) # <<<<<<<<<<<<<< * info.readonly = not PyArray_ISWRITEABLE(self) * */ __pyx_v_info->itemsize = PyArray_ITEMSIZE(__pyx_v_self); /* "C:/Anaconda2/lib/site-packages/Cython/Includes/numpy/__init__.pxd":239 * info.suboffsets = NULL * info.itemsize = PyArray_ITEMSIZE(self) * info.readonly = not PyArray_ISWRITEABLE(self) # <<<<<<<<<<<<<< * * cdef int t */ __pyx_v_info->readonly = (!(PyArray_ISWRITEABLE(__pyx_v_self) != 0)); /* "C:/Anaconda2/lib/site-packages/Cython/Includes/numpy/__init__.pxd":242 * * cdef int t * cdef char* f = NULL # <<<<<<<<<<<<<< * cdef dtype descr = self.descr * cdef int offset */ __pyx_v_f = NULL; /* "C:/Anaconda2/lib/site-packages/Cython/Includes/numpy/__init__.pxd":243 * cdef int t * cdef char* f = NULL * cdef dtype descr = self.descr # <<<<<<<<<<<<<< * cdef int offset * */ __pyx_t_3 = ((PyObject *)__pyx_v_self->descr); __Pyx_INCREF(__pyx_t_3); __pyx_v_descr = ((PyArray_Descr *)__pyx_t_3); __pyx_t_3 = 0; /* "C:/Anaconda2/lib/site-packages/Cython/Includes/numpy/__init__.pxd":246 * cdef int offset * * cdef bint hasfields = PyDataType_HASFIELDS(descr) # <<<<<<<<<<<<<< * * if not hasfields and not copy_shape: */ __pyx_v_hasfields = PyDataType_HASFIELDS(__pyx_v_descr); /* "C:/Anaconda2/lib/site-packages/Cython/Includes/numpy/__init__.pxd":248 * cdef bint hasfields = PyDataType_HASFIELDS(descr) * * if not hasfields and not copy_shape: # <<<<<<<<<<<<<< * # do not call releasebuffer * info.obj = None */ __pyx_t_2 = ((!(__pyx_v_hasfields != 0)) != 0); if (__pyx_t_2) { } else { __pyx_t_1 = __pyx_t_2; goto __pyx_L15_bool_binop_done; } __pyx_t_2 = ((!(__pyx_v_copy_shape != 0)) != 0); __pyx_t_1 = __pyx_t_2; __pyx_L15_bool_binop_done:; if (__pyx_t_1) { /* "C:/Anaconda2/lib/site-packages/Cython/Includes/numpy/__init__.pxd":250 * if not hasfields and not copy_shape: * # do not call releasebuffer * info.obj = None # <<<<<<<<<<<<<< * else: * # need to call releasebuffer */ __Pyx_INCREF(Py_None); __Pyx_GIVEREF(Py_None); __Pyx_GOTREF(__pyx_v_info->obj); __Pyx_DECREF(__pyx_v_info->obj); __pyx_v_info->obj = Py_None; /* "C:/Anaconda2/lib/site-packages/Cython/Includes/numpy/__init__.pxd":248 * cdef bint hasfields = PyDataType_HASFIELDS(descr) * * if not hasfields and not copy_shape: # <<<<<<<<<<<<<< * # do not call releasebuffer * info.obj = None */ goto __pyx_L14; } /* "C:/Anaconda2/lib/site-packages/Cython/Includes/numpy/__init__.pxd":253 * else: * # need to call releasebuffer * info.obj = self # <<<<<<<<<<<<<< * * if not hasfields: */ /*else*/ { __Pyx_INCREF(((PyObject *)__pyx_v_self)); __Pyx_GIVEREF(((PyObject *)__pyx_v_self)); __Pyx_GOTREF(__pyx_v_info->obj); __Pyx_DECREF(__pyx_v_info->obj); __pyx_v_info->obj = ((PyObject *)__pyx_v_self); } __pyx_L14:; /* "C:/Anaconda2/lib/site-packages/Cython/Includes/numpy/__init__.pxd":255 * info.obj = self * * if not hasfields: # <<<<<<<<<<<<<< * t = descr.type_num * if ((descr.byteorder == c'>' and little_endian) or */ __pyx_t_1 = ((!(__pyx_v_hasfields != 0)) != 0); if (__pyx_t_1) { /* "C:/Anaconda2/lib/site-packages/Cython/Includes/numpy/__init__.pxd":256 * * if not hasfields: * t = descr.type_num # <<<<<<<<<<<<<< * if ((descr.byteorder == c'>' and little_endian) or * (descr.byteorder == c'<' and not little_endian)): */ __pyx_t_4 = __pyx_v_descr->type_num; __pyx_v_t = __pyx_t_4; /* "C:/Anaconda2/lib/site-packages/Cython/Includes/numpy/__init__.pxd":257 * if not hasfields: * t = descr.type_num * if ((descr.byteorder == c'>' and little_endian) or # <<<<<<<<<<<<<< * (descr.byteorder == c'<' and not little_endian)): * raise ValueError(u"Non-native byte order not supported") */ __pyx_t_2 = ((__pyx_v_descr->byteorder == '>') != 0); if (!__pyx_t_2) { goto __pyx_L20_next_or; } else { } __pyx_t_2 = (__pyx_v_little_endian != 0); if (!__pyx_t_2) { } else { __pyx_t_1 = __pyx_t_2; goto __pyx_L19_bool_binop_done; } __pyx_L20_next_or:; /* "C:/Anaconda2/lib/site-packages/Cython/Includes/numpy/__init__.pxd":258 * t = descr.type_num * if ((descr.byteorder == c'>' and little_endian) or * (descr.byteorder == c'<' and not little_endian)): # <<<<<<<<<<<<<< * raise ValueError(u"Non-native byte order not supported") * if t == NPY_BYTE: f = "b" */ __pyx_t_2 = ((__pyx_v_descr->byteorder == '<') != 0); if (__pyx_t_2) { } else { __pyx_t_1 = __pyx_t_2; goto __pyx_L19_bool_binop_done; } __pyx_t_2 = ((!(__pyx_v_little_endian != 0)) != 0); __pyx_t_1 = __pyx_t_2; __pyx_L19_bool_binop_done:; /* "C:/Anaconda2/lib/site-packages/Cython/Includes/numpy/__init__.pxd":257 * if not hasfields: * t = descr.type_num * if ((descr.byteorder == c'>' and little_endian) or # <<<<<<<<<<<<<< * (descr.byteorder == c'<' and not little_endian)): * raise ValueError(u"Non-native byte order not supported") */ if (__pyx_t_1) { /* "C:/Anaconda2/lib/site-packages/Cython/Includes/numpy/__init__.pxd":259 * if ((descr.byteorder == c'>' and little_endian) or * (descr.byteorder == c'<' and not little_endian)): * raise ValueError(u"Non-native byte order not supported") # <<<<<<<<<<<<<< * if t == NPY_BYTE: f = "b" * elif t == NPY_UBYTE: f = "B" */ __pyx_t_3 = __Pyx_PyObject_Call(__pyx_builtin_ValueError, __pyx_tuple__7, NULL); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 259, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __Pyx_Raise(__pyx_t_3, 0, 0, 0); __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; __PYX_ERR(1, 259, __pyx_L1_error) /* "C:/Anaconda2/lib/site-packages/Cython/Includes/numpy/__init__.pxd":257 * if not hasfields: * t = descr.type_num * if ((descr.byteorder == c'>' and little_endian) or # <<<<<<<<<<<<<< * (descr.byteorder == c'<' and not little_endian)): * raise ValueError(u"Non-native byte order not supported") */ } /* "C:/Anaconda2/lib/site-packages/Cython/Includes/numpy/__init__.pxd":260 * (descr.byteorder == c'<' and not little_endian)): * raise ValueError(u"Non-native byte order not supported") * if t == NPY_BYTE: f = "b" # <<<<<<<<<<<<<< * elif t == NPY_UBYTE: f = "B" * elif t == NPY_SHORT: f = "h" */ switch (__pyx_v_t) { case NPY_BYTE: __pyx_v_f = ((char *)"b"); break; /* "C:/Anaconda2/lib/site-packages/Cython/Includes/numpy/__init__.pxd":261 * raise ValueError(u"Non-native byte order not supported") * if t == NPY_BYTE: f = "b" * elif t == NPY_UBYTE: f = "B" # <<<<<<<<<<<<<< * elif t == NPY_SHORT: f = "h" * elif t == NPY_USHORT: f = "H" */ case NPY_UBYTE: __pyx_v_f = ((char *)"B"); break; /* "C:/Anaconda2/lib/site-packages/Cython/Includes/numpy/__init__.pxd":262 * if t == NPY_BYTE: f = "b" * elif t == NPY_UBYTE: f = "B" * elif t == NPY_SHORT: f = "h" # <<<<<<<<<<<<<< * elif t == NPY_USHORT: f = "H" * elif t == NPY_INT: f = "i" */ case NPY_SHORT: __pyx_v_f = ((char *)"h"); break; /* "C:/Anaconda2/lib/site-packages/Cython/Includes/numpy/__init__.pxd":263 * elif t == NPY_UBYTE: f = "B" * elif t == NPY_SHORT: f = "h" * elif t == NPY_USHORT: f = "H" # <<<<<<<<<<<<<< * elif t == NPY_INT: f = "i" * elif t == NPY_UINT: f = "I" */ case NPY_USHORT: __pyx_v_f = ((char *)"H"); break; /* "C:/Anaconda2/lib/site-packages/Cython/Includes/numpy/__init__.pxd":264 * elif t == NPY_SHORT: f = "h" * elif t == NPY_USHORT: f = "H" * elif t == NPY_INT: f = "i" # <<<<<<<<<<<<<< * elif t == NPY_UINT: f = "I" * elif t == NPY_LONG: f = "l" */ case NPY_INT: __pyx_v_f = ((char *)"i"); break; /* "C:/Anaconda2/lib/site-packages/Cython/Includes/numpy/__init__.pxd":265 * elif t == NPY_USHORT: f = "H" * elif t == NPY_INT: f = "i" * elif t == NPY_UINT: f = "I" # <<<<<<<<<<<<<< * elif t == NPY_LONG: f = "l" * elif t == NPY_ULONG: f = "L" */ case NPY_UINT: __pyx_v_f = ((char *)"I"); break; /* "C:/Anaconda2/lib/site-packages/Cython/Includes/numpy/__init__.pxd":266 * elif t == NPY_INT: f = "i" * elif t == NPY_UINT: f = "I" * elif t == NPY_LONG: f = "l" # <<<<<<<<<<<<<< * elif t == NPY_ULONG: f = "L" * elif t == NPY_LONGLONG: f = "q" */ case NPY_LONG: __pyx_v_f = ((char *)"l"); break; /* "C:/Anaconda2/lib/site-packages/Cython/Includes/numpy/__init__.pxd":267 * elif t == NPY_UINT: f = "I" * elif t == NPY_LONG: f = "l" * elif t == NPY_ULONG: f = "L" # <<<<<<<<<<<<<< * elif t == NPY_LONGLONG: f = "q" * elif t == NPY_ULONGLONG: f = "Q" */ case NPY_ULONG: __pyx_v_f = ((char *)"L"); break; /* "C:/Anaconda2/lib/site-packages/Cython/Includes/numpy/__init__.pxd":268 * elif t == NPY_LONG: f = "l" * elif t == NPY_ULONG: f = "L" * elif t == NPY_LONGLONG: f = "q" # <<<<<<<<<<<<<< * elif t == NPY_ULONGLONG: f = "Q" * elif t == NPY_FLOAT: f = "f" */ case NPY_LONGLONG: __pyx_v_f = ((char *)"q"); break; /* "C:/Anaconda2/lib/site-packages/Cython/Includes/numpy/__init__.pxd":269 * elif t == NPY_ULONG: f = "L" * elif t == NPY_LONGLONG: f = "q" * elif t == NPY_ULONGLONG: f = "Q" # <<<<<<<<<<<<<< * elif t == NPY_FLOAT: f = "f" * elif t == NPY_DOUBLE: f = "d" */ case NPY_ULONGLONG: __pyx_v_f = ((char *)"Q"); break; /* "C:/Anaconda2/lib/site-packages/Cython/Includes/numpy/__init__.pxd":270 * elif t == NPY_LONGLONG: f = "q" * elif t == NPY_ULONGLONG: f = "Q" * elif t == NPY_FLOAT: f = "f" # <<<<<<<<<<<<<< * elif t == NPY_DOUBLE: f = "d" * elif t == NPY_LONGDOUBLE: f = "g" */ case NPY_FLOAT: __pyx_v_f = ((char *)"f"); break; /* "C:/Anaconda2/lib/site-packages/Cython/Includes/numpy/__init__.pxd":271 * elif t == NPY_ULONGLONG: f = "Q" * elif t == NPY_FLOAT: f = "f" * elif t == NPY_DOUBLE: f = "d" # <<<<<<<<<<<<<< * elif t == NPY_LONGDOUBLE: f = "g" * elif t == NPY_CFLOAT: f = "Zf" */ case NPY_DOUBLE: __pyx_v_f = ((char *)"d"); break; /* "C:/Anaconda2/lib/site-packages/Cython/Includes/numpy/__init__.pxd":272 * elif t == NPY_FLOAT: f = "f" * elif t == NPY_DOUBLE: f = "d" * elif t == NPY_LONGDOUBLE: f = "g" # <<<<<<<<<<<<<< * elif t == NPY_CFLOAT: f = "Zf" * elif t == NPY_CDOUBLE: f = "Zd" */ case NPY_LONGDOUBLE: __pyx_v_f = ((char *)"g"); break; /* "C:/Anaconda2/lib/site-packages/Cython/Includes/numpy/__init__.pxd":273 * elif t == NPY_DOUBLE: f = "d" * elif t == NPY_LONGDOUBLE: f = "g" * elif t == NPY_CFLOAT: f = "Zf" # <<<<<<<<<<<<<< * elif t == NPY_CDOUBLE: f = "Zd" * elif t == NPY_CLONGDOUBLE: f = "Zg" */ case NPY_CFLOAT: __pyx_v_f = ((char *)"Zf"); break; /* "C:/Anaconda2/lib/site-packages/Cython/Includes/numpy/__init__.pxd":274 * elif t == NPY_LONGDOUBLE: f = "g" * elif t == NPY_CFLOAT: f = "Zf" * elif t == NPY_CDOUBLE: f = "Zd" # <<<<<<<<<<<<<< * elif t == NPY_CLONGDOUBLE: f = "Zg" * elif t == NPY_OBJECT: f = "O" */ case NPY_CDOUBLE: __pyx_v_f = ((char *)"Zd"); break; /* "C:/Anaconda2/lib/site-packages/Cython/Includes/numpy/__init__.pxd":275 * elif t == NPY_CFLOAT: f = "Zf" * elif t == NPY_CDOUBLE: f = "Zd" * elif t == NPY_CLONGDOUBLE: f = "Zg" # <<<<<<<<<<<<<< * elif t == NPY_OBJECT: f = "O" * else: */ case NPY_CLONGDOUBLE: __pyx_v_f = ((char *)"Zg"); break; /* "C:/Anaconda2/lib/site-packages/Cython/Includes/numpy/__init__.pxd":276 * elif t == NPY_CDOUBLE: f = "Zd" * elif t == NPY_CLONGDOUBLE: f = "Zg" * elif t == NPY_OBJECT: f = "O" # <<<<<<<<<<<<<< * else: * raise ValueError(u"unknown dtype code in numpy.pxd (%d)" % t) */ case NPY_OBJECT: __pyx_v_f = ((char *)"O"); break; default: /* "C:/Anaconda2/lib/site-packages/Cython/Includes/numpy/__init__.pxd":278 * elif t == NPY_OBJECT: f = "O" * else: * raise ValueError(u"unknown dtype code in numpy.pxd (%d)" % t) # <<<<<<<<<<<<<< * info.format = f * return */ __pyx_t_3 = __Pyx_PyInt_From_int(__pyx_v_t); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 278, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __pyx_t_6 = PyUnicode_Format(__pyx_kp_u_unknown_dtype_code_in_numpy_pxd, __pyx_t_3); if (unlikely(!__pyx_t_6)) __PYX_ERR(1, 278, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_6); __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; __pyx_t_3 = PyTuple_New(1); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 278, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __Pyx_GIVEREF(__pyx_t_6); PyTuple_SET_ITEM(__pyx_t_3, 0, __pyx_t_6); __pyx_t_6 = 0; __pyx_t_6 = __Pyx_PyObject_Call(__pyx_builtin_ValueError, __pyx_t_3, NULL); if (unlikely(!__pyx_t_6)) __PYX_ERR(1, 278, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_6); __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; __Pyx_Raise(__pyx_t_6, 0, 0, 0); __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0; __PYX_ERR(1, 278, __pyx_L1_error) break; } /* "C:/Anaconda2/lib/site-packages/Cython/Includes/numpy/__init__.pxd":279 * else: * raise ValueError(u"unknown dtype code in numpy.pxd (%d)" % t) * info.format = f # <<<<<<<<<<<<<< * return * else: */ __pyx_v_info->format = __pyx_v_f; /* "C:/Anaconda2/lib/site-packages/Cython/Includes/numpy/__init__.pxd":280 * raise ValueError(u"unknown dtype code in numpy.pxd (%d)" % t) * info.format = f * return # <<<<<<<<<<<<<< * else: * info.format = <char*>stdlib.malloc(_buffer_format_string_len) */ __pyx_r = 0; goto __pyx_L0; /* "C:/Anaconda2/lib/site-packages/Cython/Includes/numpy/__init__.pxd":255 * info.obj = self * * if not hasfields: # <<<<<<<<<<<<<< * t = descr.type_num * if ((descr.byteorder == c'>' and little_endian) or */ } /* "C:/Anaconda2/lib/site-packages/Cython/Includes/numpy/__init__.pxd":282 * return * else: * info.format = <char*>stdlib.malloc(_buffer_format_string_len) # <<<<<<<<<<<<<< * info.format[0] = c'^' # Native data types, manual alignment * offset = 0 */ /*else*/ { __pyx_v_info->format = ((char *)malloc(0xFF)); /* "C:/Anaconda2/lib/site-packages/Cython/Includes/numpy/__init__.pxd":283 * else: * info.format = <char*>stdlib.malloc(_buffer_format_string_len) * info.format[0] = c'^' # Native data types, manual alignment # <<<<<<<<<<<<<< * offset = 0 * f = _util_dtypestring(descr, info.format + 1, */ (__pyx_v_info->format[0]) = '^'; /* "C:/Anaconda2/lib/site-packages/Cython/Includes/numpy/__init__.pxd":284 * info.format = <char*>stdlib.malloc(_buffer_format_string_len) * info.format[0] = c'^' # Native data types, manual alignment * offset = 0 # <<<<<<<<<<<<<< * f = _util_dtypestring(descr, info.format + 1, * info.format + _buffer_format_string_len, */ __pyx_v_offset = 0; /* "C:/Anaconda2/lib/site-packages/Cython/Includes/numpy/__init__.pxd":285 * info.format[0] = c'^' # Native data types, manual alignment * offset = 0 * f = _util_dtypestring(descr, info.format + 1, # <<<<<<<<<<<<<< * info.format + _buffer_format_string_len, * &offset) */ __pyx_t_7 = __pyx_f_5numpy__util_dtypestring(__pyx_v_descr, (__pyx_v_info->format + 1), (__pyx_v_info->format + 0xFF), (&__pyx_v_offset)); if (unlikely(__pyx_t_7 == NULL)) __PYX_ERR(1, 285, __pyx_L1_error) __pyx_v_f = __pyx_t_7; /* "C:/Anaconda2/lib/site-packages/Cython/Includes/numpy/__init__.pxd":288 * info.format + _buffer_format_string_len, * &offset) * f[0] = c'\0' # Terminate format string # <<<<<<<<<<<<<< * * def __releasebuffer__(ndarray self, Py_buffer* info): */ (__pyx_v_f[0]) = '\x00'; } /* "C:/Anaconda2/lib/site-packages/Cython/Includes/numpy/__init__.pxd":197 * # experimental exception made for __getbuffer__ and __releasebuffer__ * # -- the details of this may change. * def __getbuffer__(ndarray self, Py_buffer* info, int flags): # <<<<<<<<<<<<<< * # This implementation of getbuffer is geared towards Cython * # requirements, and does not yet fullfill the PEP. */ /* function exit code */ __pyx_r = 0; goto __pyx_L0; __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_3); __Pyx_XDECREF(__pyx_t_6); __Pyx_AddTraceback("numpy.ndarray.__getbuffer__", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = -1; if (__pyx_v_info != NULL && __pyx_v_info->obj != NULL) { __Pyx_GOTREF(__pyx_v_info->obj); __Pyx_DECREF(__pyx_v_info->obj); __pyx_v_info->obj = NULL; } goto __pyx_L2; __pyx_L0:; if (__pyx_v_info != NULL && __pyx_v_info->obj == Py_None) { __Pyx_GOTREF(Py_None); __Pyx_DECREF(Py_None); __pyx_v_info->obj = NULL; } __pyx_L2:; __Pyx_XDECREF((PyObject *)__pyx_v_descr); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "C:/Anaconda2/lib/site-packages/Cython/Includes/numpy/__init__.pxd":290 * f[0] = c'\0' # Terminate format string * * def __releasebuffer__(ndarray self, Py_buffer* info): # <<<<<<<<<<<<<< * if PyArray_HASFIELDS(self): * stdlib.free(info.format) */ /* Python wrapper */ static CYTHON_UNUSED void __pyx_pw_5numpy_7ndarray_3__releasebuffer__(PyObject *__pyx_v_self, Py_buffer *__pyx_v_info); /*proto*/ static CYTHON_UNUSED void __pyx_pw_5numpy_7ndarray_3__releasebuffer__(PyObject *__pyx_v_self, Py_buffer *__pyx_v_info) { __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("__releasebuffer__ (wrapper)", 0); __pyx_pf_5numpy_7ndarray_2__releasebuffer__(((PyArrayObject *)__pyx_v_self), ((Py_buffer *)__pyx_v_info)); /* function exit code */ __Pyx_RefNannyFinishContext(); } static void __pyx_pf_5numpy_7ndarray_2__releasebuffer__(PyArrayObject *__pyx_v_self, Py_buffer *__pyx_v_info) { __Pyx_RefNannyDeclarations int __pyx_t_1; __Pyx_RefNannySetupContext("__releasebuffer__", 0); /* "C:/Anaconda2/lib/site-packages/Cython/Includes/numpy/__init__.pxd":291 * * def __releasebuffer__(ndarray self, Py_buffer* info): * if PyArray_HASFIELDS(self): # <<<<<<<<<<<<<< * stdlib.free(info.format) * if sizeof(npy_intp) != sizeof(Py_ssize_t): */ __pyx_t_1 = (PyArray_HASFIELDS(__pyx_v_self) != 0); if (__pyx_t_1) { /* "C:/Anaconda2/lib/site-packages/Cython/Includes/numpy/__init__.pxd":292 * def __releasebuffer__(ndarray self, Py_buffer* info): * if PyArray_HASFIELDS(self): * stdlib.free(info.format) # <<<<<<<<<<<<<< * if sizeof(npy_intp) != sizeof(Py_ssize_t): * stdlib.free(info.strides) */ free(__pyx_v_info->format); /* "C:/Anaconda2/lib/site-packages/Cython/Includes/numpy/__init__.pxd":291 * * def __releasebuffer__(ndarray self, Py_buffer* info): * if PyArray_HASFIELDS(self): # <<<<<<<<<<<<<< * stdlib.free(info.format) * if sizeof(npy_intp) != sizeof(Py_ssize_t): */ } /* "C:/Anaconda2/lib/site-packages/Cython/Includes/numpy/__init__.pxd":293 * if PyArray_HASFIELDS(self): * stdlib.free(info.format) * if sizeof(npy_intp) != sizeof(Py_ssize_t): # <<<<<<<<<<<<<< * stdlib.free(info.strides) * # info.shape was stored after info.strides in the same block */ __pyx_t_1 = (((sizeof(npy_intp)) != (sizeof(Py_ssize_t))) != 0); if (__pyx_t_1) { /* "C:/Anaconda2/lib/site-packages/Cython/Includes/numpy/__init__.pxd":294 * stdlib.free(info.format) * if sizeof(npy_intp) != sizeof(Py_ssize_t): * stdlib.free(info.strides) # <<<<<<<<<<<<<< * # info.shape was stored after info.strides in the same block * */ free(__pyx_v_info->strides); /* "C:/Anaconda2/lib/site-packages/Cython/Includes/numpy/__init__.pxd":293 * if PyArray_HASFIELDS(self): * stdlib.free(info.format) * if sizeof(npy_intp) != sizeof(Py_ssize_t): # <<<<<<<<<<<<<< * stdlib.free(info.strides) * # info.shape was stored after info.strides in the same block */ } /* "C:/Anaconda2/lib/site-packages/Cython/Includes/numpy/__init__.pxd":290 * f[0] = c'\0' # Terminate format string * * def __releasebuffer__(ndarray self, Py_buffer* info): # <<<<<<<<<<<<<< * if PyArray_HASFIELDS(self): * stdlib.free(info.format) */ /* function exit code */ __Pyx_RefNannyFinishContext(); } /* "C:/Anaconda2/lib/site-packages/Cython/Includes/numpy/__init__.pxd":770 * ctypedef npy_cdouble complex_t * * cdef inline object PyArray_MultiIterNew1(a): # <<<<<<<<<<<<<< * return PyArray_MultiIterNew(1, <void*>a) * */ static CYTHON_INLINE PyObject *__pyx_f_5numpy_PyArray_MultiIterNew1(PyObject *__pyx_v_a) { PyObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations PyObject *__pyx_t_1 = NULL; __Pyx_RefNannySetupContext("PyArray_MultiIterNew1", 0); /* "C:/Anaconda2/lib/site-packages/Cython/Includes/numpy/__init__.pxd":771 * * cdef inline object PyArray_MultiIterNew1(a): * return PyArray_MultiIterNew(1, <void*>a) # <<<<<<<<<<<<<< * * cdef inline object PyArray_MultiIterNew2(a, b): */ __Pyx_XDECREF(__pyx_r); __pyx_t_1 = PyArray_MultiIterNew(1, ((void *)__pyx_v_a)); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 771, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __pyx_r = __pyx_t_1; __pyx_t_1 = 0; goto __pyx_L0; /* "C:/Anaconda2/lib/site-packages/Cython/Includes/numpy/__init__.pxd":770 * ctypedef npy_cdouble complex_t * * cdef inline object PyArray_MultiIterNew1(a): # <<<<<<<<<<<<<< * return PyArray_MultiIterNew(1, <void*>a) * */ /* function exit code */ __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_1); __Pyx_AddTraceback("numpy.PyArray_MultiIterNew1", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = 0; __pyx_L0:; __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "C:/Anaconda2/lib/site-packages/Cython/Includes/numpy/__init__.pxd":773 * return PyArray_MultiIterNew(1, <void*>a) * * cdef inline object PyArray_MultiIterNew2(a, b): # <<<<<<<<<<<<<< * return PyArray_MultiIterNew(2, <void*>a, <void*>b) * */ static CYTHON_INLINE PyObject *__pyx_f_5numpy_PyArray_MultiIterNew2(PyObject *__pyx_v_a, PyObject *__pyx_v_b) { PyObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations PyObject *__pyx_t_1 = NULL; __Pyx_RefNannySetupContext("PyArray_MultiIterNew2", 0); /* "C:/Anaconda2/lib/site-packages/Cython/Includes/numpy/__init__.pxd":774 * * cdef inline object PyArray_MultiIterNew2(a, b): * return PyArray_MultiIterNew(2, <void*>a, <void*>b) # <<<<<<<<<<<<<< * * cdef inline object PyArray_MultiIterNew3(a, b, c): */ __Pyx_XDECREF(__pyx_r); __pyx_t_1 = PyArray_MultiIterNew(2, ((void *)__pyx_v_a), ((void *)__pyx_v_b)); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 774, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __pyx_r = __pyx_t_1; __pyx_t_1 = 0; goto __pyx_L0; /* "C:/Anaconda2/lib/site-packages/Cython/Includes/numpy/__init__.pxd":773 * return PyArray_MultiIterNew(1, <void*>a) * * cdef inline object PyArray_MultiIterNew2(a, b): # <<<<<<<<<<<<<< * return PyArray_MultiIterNew(2, <void*>a, <void*>b) * */ /* function exit code */ __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_1); __Pyx_AddTraceback("numpy.PyArray_MultiIterNew2", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = 0; __pyx_L0:; __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "C:/Anaconda2/lib/site-packages/Cython/Includes/numpy/__init__.pxd":776 * return PyArray_MultiIterNew(2, <void*>a, <void*>b) * * cdef inline object PyArray_MultiIterNew3(a, b, c): # <<<<<<<<<<<<<< * return PyArray_MultiIterNew(3, <void*>a, <void*>b, <void*> c) * */ static CYTHON_INLINE PyObject *__pyx_f_5numpy_PyArray_MultiIterNew3(PyObject *__pyx_v_a, PyObject *__pyx_v_b, PyObject *__pyx_v_c) { PyObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations PyObject *__pyx_t_1 = NULL; __Pyx_RefNannySetupContext("PyArray_MultiIterNew3", 0); /* "C:/Anaconda2/lib/site-packages/Cython/Includes/numpy/__init__.pxd":777 * * cdef inline object PyArray_MultiIterNew3(a, b, c): * return PyArray_MultiIterNew(3, <void*>a, <void*>b, <void*> c) # <<<<<<<<<<<<<< * * cdef inline object PyArray_MultiIterNew4(a, b, c, d): */ __Pyx_XDECREF(__pyx_r); __pyx_t_1 = PyArray_MultiIterNew(3, ((void *)__pyx_v_a), ((void *)__pyx_v_b), ((void *)__pyx_v_c)); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 777, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __pyx_r = __pyx_t_1; __pyx_t_1 = 0; goto __pyx_L0; /* "C:/Anaconda2/lib/site-packages/Cython/Includes/numpy/__init__.pxd":776 * return PyArray_MultiIterNew(2, <void*>a, <void*>b) * * cdef inline object PyArray_MultiIterNew3(a, b, c): # <<<<<<<<<<<<<< * return PyArray_MultiIterNew(3, <void*>a, <void*>b, <void*> c) * */ /* function exit code */ __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_1); __Pyx_AddTraceback("numpy.PyArray_MultiIterNew3", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = 0; __pyx_L0:; __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "C:/Anaconda2/lib/site-packages/Cython/Includes/numpy/__init__.pxd":779 * return PyArray_MultiIterNew(3, <void*>a, <void*>b, <void*> c) * * cdef inline object PyArray_MultiIterNew4(a, b, c, d): # <<<<<<<<<<<<<< * return PyArray_MultiIterNew(4, <void*>a, <void*>b, <void*>c, <void*> d) * */ static CYTHON_INLINE PyObject *__pyx_f_5numpy_PyArray_MultiIterNew4(PyObject *__pyx_v_a, PyObject *__pyx_v_b, PyObject *__pyx_v_c, PyObject *__pyx_v_d) { PyObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations PyObject *__pyx_t_1 = NULL; __Pyx_RefNannySetupContext("PyArray_MultiIterNew4", 0); /* "C:/Anaconda2/lib/site-packages/Cython/Includes/numpy/__init__.pxd":780 * * cdef inline object PyArray_MultiIterNew4(a, b, c, d): * return PyArray_MultiIterNew(4, <void*>a, <void*>b, <void*>c, <void*> d) # <<<<<<<<<<<<<< * * cdef inline object PyArray_MultiIterNew5(a, b, c, d, e): */ __Pyx_XDECREF(__pyx_r); __pyx_t_1 = PyArray_MultiIterNew(4, ((void *)__pyx_v_a), ((void *)__pyx_v_b), ((void *)__pyx_v_c), ((void *)__pyx_v_d)); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 780, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __pyx_r = __pyx_t_1; __pyx_t_1 = 0; goto __pyx_L0; /* "C:/Anaconda2/lib/site-packages/Cython/Includes/numpy/__init__.pxd":779 * return PyArray_MultiIterNew(3, <void*>a, <void*>b, <void*> c) * * cdef inline object PyArray_MultiIterNew4(a, b, c, d): # <<<<<<<<<<<<<< * return PyArray_MultiIterNew(4, <void*>a, <void*>b, <void*>c, <void*> d) * */ /* function exit code */ __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_1); __Pyx_AddTraceback("numpy.PyArray_MultiIterNew4", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = 0; __pyx_L0:; __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "C:/Anaconda2/lib/site-packages/Cython/Includes/numpy/__init__.pxd":782 * return PyArray_MultiIterNew(4, <void*>a, <void*>b, <void*>c, <void*> d) * * cdef inline object PyArray_MultiIterNew5(a, b, c, d, e): # <<<<<<<<<<<<<< * return PyArray_MultiIterNew(5, <void*>a, <void*>b, <void*>c, <void*> d, <void*> e) * */ static CYTHON_INLINE PyObject *__pyx_f_5numpy_PyArray_MultiIterNew5(PyObject *__pyx_v_a, PyObject *__pyx_v_b, PyObject *__pyx_v_c, PyObject *__pyx_v_d, PyObject *__pyx_v_e) { PyObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations PyObject *__pyx_t_1 = NULL; __Pyx_RefNannySetupContext("PyArray_MultiIterNew5", 0); /* "C:/Anaconda2/lib/site-packages/Cython/Includes/numpy/__init__.pxd":783 * * cdef inline object PyArray_MultiIterNew5(a, b, c, d, e): * return PyArray_MultiIterNew(5, <void*>a, <void*>b, <void*>c, <void*> d, <void*> e) # <<<<<<<<<<<<<< * * cdef inline char* _util_dtypestring(dtype descr, char* f, char* end, int* offset) except NULL: */ __Pyx_XDECREF(__pyx_r); __pyx_t_1 = PyArray_MultiIterNew(5, ((void *)__pyx_v_a), ((void *)__pyx_v_b), ((void *)__pyx_v_c), ((void *)__pyx_v_d), ((void *)__pyx_v_e)); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 783, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __pyx_r = __pyx_t_1; __pyx_t_1 = 0; goto __pyx_L0; /* "C:/Anaconda2/lib/site-packages/Cython/Includes/numpy/__init__.pxd":782 * return PyArray_MultiIterNew(4, <void*>a, <void*>b, <void*>c, <void*> d) * * cdef inline object PyArray_MultiIterNew5(a, b, c, d, e): # <<<<<<<<<<<<<< * return PyArray_MultiIterNew(5, <void*>a, <void*>b, <void*>c, <void*> d, <void*> e) * */ /* function exit code */ __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_1); __Pyx_AddTraceback("numpy.PyArray_MultiIterNew5", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = 0; __pyx_L0:; __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "C:/Anaconda2/lib/site-packages/Cython/Includes/numpy/__init__.pxd":785 * return PyArray_MultiIterNew(5, <void*>a, <void*>b, <void*>c, <void*> d, <void*> e) * * cdef inline char* _util_dtypestring(dtype descr, char* f, char* end, int* offset) except NULL: # <<<<<<<<<<<<<< * # Recursive utility function used in __getbuffer__ to get format * # string. The new location in the format string is returned. */ static CYTHON_INLINE char *__pyx_f_5numpy__util_dtypestring(PyArray_Descr *__pyx_v_descr, char *__pyx_v_f, char *__pyx_v_end, int *__pyx_v_offset) { PyArray_Descr *__pyx_v_child = 0; int __pyx_v_endian_detector; int __pyx_v_little_endian; PyObject *__pyx_v_fields = 0; PyObject *__pyx_v_childname = NULL; PyObject *__pyx_v_new_offset = NULL; PyObject *__pyx_v_t = NULL; char *__pyx_r; __Pyx_RefNannyDeclarations PyObject *__pyx_t_1 = NULL; Py_ssize_t __pyx_t_2; PyObject *__pyx_t_3 = NULL; PyObject *__pyx_t_4 = NULL; int __pyx_t_5; int __pyx_t_6; int __pyx_t_7; long __pyx_t_8; char *__pyx_t_9; __Pyx_RefNannySetupContext("_util_dtypestring", 0); /* "C:/Anaconda2/lib/site-packages/Cython/Includes/numpy/__init__.pxd":790 * * cdef dtype child * cdef int endian_detector = 1 # <<<<<<<<<<<<<< * cdef bint little_endian = ((<char*>&endian_detector)[0] != 0) * cdef tuple fields */ __pyx_v_endian_detector = 1; /* "C:/Anaconda2/lib/site-packages/Cython/Includes/numpy/__init__.pxd":791 * cdef dtype child * cdef int endian_detector = 1 * cdef bint little_endian = ((<char*>&endian_detector)[0] != 0) # <<<<<<<<<<<<<< * cdef tuple fields * */ __pyx_v_little_endian = ((((char *)(&__pyx_v_endian_detector))[0]) != 0); /* "C:/Anaconda2/lib/site-packages/Cython/Includes/numpy/__init__.pxd":794 * cdef tuple fields * * for childname in descr.names: # <<<<<<<<<<<<<< * fields = descr.fields[childname] * child, new_offset = fields */ if (unlikely(__pyx_v_descr->names == Py_None)) { PyErr_SetString(PyExc_TypeError, "'NoneType' object is not iterable"); __PYX_ERR(1, 794, __pyx_L1_error) } __pyx_t_1 = __pyx_v_descr->names; __Pyx_INCREF(__pyx_t_1); __pyx_t_2 = 0; for (;;) { if (__pyx_t_2 >= PyTuple_GET_SIZE(__pyx_t_1)) break; #if CYTHON_COMPILING_IN_CPYTHON __pyx_t_3 = PyTuple_GET_ITEM(__pyx_t_1, __pyx_t_2); __Pyx_INCREF(__pyx_t_3); __pyx_t_2++; if (unlikely(0 < 0)) __PYX_ERR(1, 794, __pyx_L1_error) #else __pyx_t_3 = PySequence_ITEM(__pyx_t_1, __pyx_t_2); __pyx_t_2++; if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 794, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); #endif __Pyx_XDECREF_SET(__pyx_v_childname, __pyx_t_3); __pyx_t_3 = 0; /* "C:/Anaconda2/lib/site-packages/Cython/Includes/numpy/__init__.pxd":795 * * for childname in descr.names: * fields = descr.fields[childname] # <<<<<<<<<<<<<< * child, new_offset = fields * */ if (unlikely(__pyx_v_descr->fields == Py_None)) { PyErr_SetString(PyExc_TypeError, "'NoneType' object is not subscriptable"); __PYX_ERR(1, 795, __pyx_L1_error) } __pyx_t_3 = __Pyx_PyDict_GetItem(__pyx_v_descr->fields, __pyx_v_childname); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 795, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); if (!(likely(PyTuple_CheckExact(__pyx_t_3))||((__pyx_t_3) == Py_None)||(PyErr_Format(PyExc_TypeError, "Expected %.16s, got %.200s", "tuple", Py_TYPE(__pyx_t_3)->tp_name), 0))) __PYX_ERR(1, 795, __pyx_L1_error) __Pyx_XDECREF_SET(__pyx_v_fields, ((PyObject*)__pyx_t_3)); __pyx_t_3 = 0; /* "C:/Anaconda2/lib/site-packages/Cython/Includes/numpy/__init__.pxd":796 * for childname in descr.names: * fields = descr.fields[childname] * child, new_offset = fields # <<<<<<<<<<<<<< * * if (end - f) - <int>(new_offset - offset[0]) < 15: */ if (likely(__pyx_v_fields != Py_None)) { PyObject* sequence = __pyx_v_fields; #if CYTHON_COMPILING_IN_CPYTHON Py_ssize_t size = Py_SIZE(sequence); #else Py_ssize_t size = PySequence_Size(sequence); #endif if (unlikely(size != 2)) { if (size > 2) __Pyx_RaiseTooManyValuesError(2); else if (size >= 0) __Pyx_RaiseNeedMoreValuesError(size); __PYX_ERR(1, 796, __pyx_L1_error) } #if CYTHON_COMPILING_IN_CPYTHON __pyx_t_3 = PyTuple_GET_ITEM(sequence, 0); __pyx_t_4 = PyTuple_GET_ITEM(sequence, 1); __Pyx_INCREF(__pyx_t_3); __Pyx_INCREF(__pyx_t_4); #else __pyx_t_3 = PySequence_ITEM(sequence, 0); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 796, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __pyx_t_4 = PySequence_ITEM(sequence, 1); if (unlikely(!__pyx_t_4)) __PYX_ERR(1, 796, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_4); #endif } else { __Pyx_RaiseNoneNotIterableError(); __PYX_ERR(1, 796, __pyx_L1_error) } if (!(likely(((__pyx_t_3) == Py_None) || likely(__Pyx_TypeTest(__pyx_t_3, __pyx_ptype_5numpy_dtype))))) __PYX_ERR(1, 796, __pyx_L1_error) __Pyx_XDECREF_SET(__pyx_v_child, ((PyArray_Descr *)__pyx_t_3)); __pyx_t_3 = 0; __Pyx_XDECREF_SET(__pyx_v_new_offset, __pyx_t_4); __pyx_t_4 = 0; /* "C:/Anaconda2/lib/site-packages/Cython/Includes/numpy/__init__.pxd":798 * child, new_offset = fields * * if (end - f) - <int>(new_offset - offset[0]) < 15: # <<<<<<<<<<<<<< * raise RuntimeError(u"Format string allocated too short, see comment in numpy.pxd") * */ __pyx_t_4 = __Pyx_PyInt_From_int((__pyx_v_offset[0])); if (unlikely(!__pyx_t_4)) __PYX_ERR(1, 798, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_4); __pyx_t_3 = PyNumber_Subtract(__pyx_v_new_offset, __pyx_t_4); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 798, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; __pyx_t_5 = __Pyx_PyInt_As_int(__pyx_t_3); if (unlikely((__pyx_t_5 == (int)-1) && PyErr_Occurred())) __PYX_ERR(1, 798, __pyx_L1_error) __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; __pyx_t_6 = ((((__pyx_v_end - __pyx_v_f) - ((int)__pyx_t_5)) < 15) != 0); if (__pyx_t_6) { /* "C:/Anaconda2/lib/site-packages/Cython/Includes/numpy/__init__.pxd":799 * * if (end - f) - <int>(new_offset - offset[0]) < 15: * raise RuntimeError(u"Format string allocated too short, see comment in numpy.pxd") # <<<<<<<<<<<<<< * * if ((child.byteorder == c'>' and little_endian) or */ __pyx_t_3 = __Pyx_PyObject_Call(__pyx_builtin_RuntimeError, __pyx_tuple__8, NULL); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 799, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __Pyx_Raise(__pyx_t_3, 0, 0, 0); __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; __PYX_ERR(1, 799, __pyx_L1_error) /* "C:/Anaconda2/lib/site-packages/Cython/Includes/numpy/__init__.pxd":798 * child, new_offset = fields * * if (end - f) - <int>(new_offset - offset[0]) < 15: # <<<<<<<<<<<<<< * raise RuntimeError(u"Format string allocated too short, see comment in numpy.pxd") * */ } /* "C:/Anaconda2/lib/site-packages/Cython/Includes/numpy/__init__.pxd":801 * raise RuntimeError(u"Format string allocated too short, see comment in numpy.pxd") * * if ((child.byteorder == c'>' and little_endian) or # <<<<<<<<<<<<<< * (child.byteorder == c'<' and not little_endian)): * raise ValueError(u"Non-native byte order not supported") */ __pyx_t_7 = ((__pyx_v_child->byteorder == '>') != 0); if (!__pyx_t_7) { goto __pyx_L8_next_or; } else { } __pyx_t_7 = (__pyx_v_little_endian != 0); if (!__pyx_t_7) { } else { __pyx_t_6 = __pyx_t_7; goto __pyx_L7_bool_binop_done; } __pyx_L8_next_or:; /* "C:/Anaconda2/lib/site-packages/Cython/Includes/numpy/__init__.pxd":802 * * if ((child.byteorder == c'>' and little_endian) or * (child.byteorder == c'<' and not little_endian)): # <<<<<<<<<<<<<< * raise ValueError(u"Non-native byte order not supported") * # One could encode it in the format string and have Cython */ __pyx_t_7 = ((__pyx_v_child->byteorder == '<') != 0); if (__pyx_t_7) { } else { __pyx_t_6 = __pyx_t_7; goto __pyx_L7_bool_binop_done; } __pyx_t_7 = ((!(__pyx_v_little_endian != 0)) != 0); __pyx_t_6 = __pyx_t_7; __pyx_L7_bool_binop_done:; /* "C:/Anaconda2/lib/site-packages/Cython/Includes/numpy/__init__.pxd":801 * raise RuntimeError(u"Format string allocated too short, see comment in numpy.pxd") * * if ((child.byteorder == c'>' and little_endian) or # <<<<<<<<<<<<<< * (child.byteorder == c'<' and not little_endian)): * raise ValueError(u"Non-native byte order not supported") */ if (__pyx_t_6) { /* "C:/Anaconda2/lib/site-packages/Cython/Includes/numpy/__init__.pxd":803 * if ((child.byteorder == c'>' and little_endian) or * (child.byteorder == c'<' and not little_endian)): * raise ValueError(u"Non-native byte order not supported") # <<<<<<<<<<<<<< * # One could encode it in the format string and have Cython * # complain instead, BUT: < and > in format strings also imply */ __pyx_t_3 = __Pyx_PyObject_Call(__pyx_builtin_ValueError, __pyx_tuple__9, NULL); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 803, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __Pyx_Raise(__pyx_t_3, 0, 0, 0); __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; __PYX_ERR(1, 803, __pyx_L1_error) /* "C:/Anaconda2/lib/site-packages/Cython/Includes/numpy/__init__.pxd":801 * raise RuntimeError(u"Format string allocated too short, see comment in numpy.pxd") * * if ((child.byteorder == c'>' and little_endian) or # <<<<<<<<<<<<<< * (child.byteorder == c'<' and not little_endian)): * raise ValueError(u"Non-native byte order not supported") */ } /* "C:/Anaconda2/lib/site-packages/Cython/Includes/numpy/__init__.pxd":813 * * # Output padding bytes * while offset[0] < new_offset: # <<<<<<<<<<<<<< * f[0] = 120 # "x"; pad byte * f += 1 */ while (1) { __pyx_t_3 = __Pyx_PyInt_From_int((__pyx_v_offset[0])); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 813, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __pyx_t_4 = PyObject_RichCompare(__pyx_t_3, __pyx_v_new_offset, Py_LT); __Pyx_XGOTREF(__pyx_t_4); if (unlikely(!__pyx_t_4)) __PYX_ERR(1, 813, __pyx_L1_error) __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; __pyx_t_6 = __Pyx_PyObject_IsTrue(__pyx_t_4); if (unlikely(__pyx_t_6 < 0)) __PYX_ERR(1, 813, __pyx_L1_error) __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; if (!__pyx_t_6) break; /* "C:/Anaconda2/lib/site-packages/Cython/Includes/numpy/__init__.pxd":814 * # Output padding bytes * while offset[0] < new_offset: * f[0] = 120 # "x"; pad byte # <<<<<<<<<<<<<< * f += 1 * offset[0] += 1 */ (__pyx_v_f[0]) = 0x78; /* "C:/Anaconda2/lib/site-packages/Cython/Includes/numpy/__init__.pxd":815 * while offset[0] < new_offset: * f[0] = 120 # "x"; pad byte * f += 1 # <<<<<<<<<<<<<< * offset[0] += 1 * */ __pyx_v_f = (__pyx_v_f + 1); /* "C:/Anaconda2/lib/site-packages/Cython/Includes/numpy/__init__.pxd":816 * f[0] = 120 # "x"; pad byte * f += 1 * offset[0] += 1 # <<<<<<<<<<<<<< * * offset[0] += child.itemsize */ __pyx_t_8 = 0; (__pyx_v_offset[__pyx_t_8]) = ((__pyx_v_offset[__pyx_t_8]) + 1); } /* "C:/Anaconda2/lib/site-packages/Cython/Includes/numpy/__init__.pxd":818 * offset[0] += 1 * * offset[0] += child.itemsize # <<<<<<<<<<<<<< * * if not PyDataType_HASFIELDS(child): */ __pyx_t_8 = 0; (__pyx_v_offset[__pyx_t_8]) = ((__pyx_v_offset[__pyx_t_8]) + __pyx_v_child->elsize); /* "C:/Anaconda2/lib/site-packages/Cython/Includes/numpy/__init__.pxd":820 * offset[0] += child.itemsize * * if not PyDataType_HASFIELDS(child): # <<<<<<<<<<<<<< * t = child.type_num * if end - f < 5: */ __pyx_t_6 = ((!(PyDataType_HASFIELDS(__pyx_v_child) != 0)) != 0); if (__pyx_t_6) { /* "C:/Anaconda2/lib/site-packages/Cython/Includes/numpy/__init__.pxd":821 * * if not PyDataType_HASFIELDS(child): * t = child.type_num # <<<<<<<<<<<<<< * if end - f < 5: * raise RuntimeError(u"Format string allocated too short.") */ __pyx_t_4 = __Pyx_PyInt_From_int(__pyx_v_child->type_num); if (unlikely(!__pyx_t_4)) __PYX_ERR(1, 821, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_4); __Pyx_XDECREF_SET(__pyx_v_t, __pyx_t_4); __pyx_t_4 = 0; /* "C:/Anaconda2/lib/site-packages/Cython/Includes/numpy/__init__.pxd":822 * if not PyDataType_HASFIELDS(child): * t = child.type_num * if end - f < 5: # <<<<<<<<<<<<<< * raise RuntimeError(u"Format string allocated too short.") * */ __pyx_t_6 = (((__pyx_v_end - __pyx_v_f) < 5) != 0); if (__pyx_t_6) { /* "C:/Anaconda2/lib/site-packages/Cython/Includes/numpy/__init__.pxd":823 * t = child.type_num * if end - f < 5: * raise RuntimeError(u"Format string allocated too short.") # <<<<<<<<<<<<<< * * # Until ticket #99 is fixed, use integers to avoid warnings */ __pyx_t_4 = __Pyx_PyObject_Call(__pyx_builtin_RuntimeError, __pyx_tuple__10, NULL); if (unlikely(!__pyx_t_4)) __PYX_ERR(1, 823, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_4); __Pyx_Raise(__pyx_t_4, 0, 0, 0); __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; __PYX_ERR(1, 823, __pyx_L1_error) /* "C:/Anaconda2/lib/site-packages/Cython/Includes/numpy/__init__.pxd":822 * if not PyDataType_HASFIELDS(child): * t = child.type_num * if end - f < 5: # <<<<<<<<<<<<<< * raise RuntimeError(u"Format string allocated too short.") * */ } /* "C:/Anaconda2/lib/site-packages/Cython/Includes/numpy/__init__.pxd":826 * * # Until ticket #99 is fixed, use integers to avoid warnings * if t == NPY_BYTE: f[0] = 98 #"b" # <<<<<<<<<<<<<< * elif t == NPY_UBYTE: f[0] = 66 #"B" * elif t == NPY_SHORT: f[0] = 104 #"h" */ __pyx_t_4 = __Pyx_PyInt_From_enum__NPY_TYPES(NPY_BYTE); if (unlikely(!__pyx_t_4)) __PYX_ERR(1, 826, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_4); __pyx_t_3 = PyObject_RichCompare(__pyx_v_t, __pyx_t_4, Py_EQ); __Pyx_XGOTREF(__pyx_t_3); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 826, __pyx_L1_error) __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; __pyx_t_6 = __Pyx_PyObject_IsTrue(__pyx_t_3); if (unlikely(__pyx_t_6 < 0)) __PYX_ERR(1, 826, __pyx_L1_error) __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; if (__pyx_t_6) { (__pyx_v_f[0]) = 98; goto __pyx_L15; } /* "C:/Anaconda2/lib/site-packages/Cython/Includes/numpy/__init__.pxd":827 * # Until ticket #99 is fixed, use integers to avoid warnings * if t == NPY_BYTE: f[0] = 98 #"b" * elif t == NPY_UBYTE: f[0] = 66 #"B" # <<<<<<<<<<<<<< * elif t == NPY_SHORT: f[0] = 104 #"h" * elif t == NPY_USHORT: f[0] = 72 #"H" */ __pyx_t_3 = __Pyx_PyInt_From_enum__NPY_TYPES(NPY_UBYTE); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 827, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __pyx_t_4 = PyObject_RichCompare(__pyx_v_t, __pyx_t_3, Py_EQ); __Pyx_XGOTREF(__pyx_t_4); if (unlikely(!__pyx_t_4)) __PYX_ERR(1, 827, __pyx_L1_error) __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; __pyx_t_6 = __Pyx_PyObject_IsTrue(__pyx_t_4); if (unlikely(__pyx_t_6 < 0)) __PYX_ERR(1, 827, __pyx_L1_error) __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; if (__pyx_t_6) { (__pyx_v_f[0]) = 66; goto __pyx_L15; } /* "C:/Anaconda2/lib/site-packages/Cython/Includes/numpy/__init__.pxd":828 * if t == NPY_BYTE: f[0] = 98 #"b" * elif t == NPY_UBYTE: f[0] = 66 #"B" * elif t == NPY_SHORT: f[0] = 104 #"h" # <<<<<<<<<<<<<< * elif t == NPY_USHORT: f[0] = 72 #"H" * elif t == NPY_INT: f[0] = 105 #"i" */ __pyx_t_4 = __Pyx_PyInt_From_enum__NPY_TYPES(NPY_SHORT); if (unlikely(!__pyx_t_4)) __PYX_ERR(1, 828, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_4); __pyx_t_3 = PyObject_RichCompare(__pyx_v_t, __pyx_t_4, Py_EQ); __Pyx_XGOTREF(__pyx_t_3); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 828, __pyx_L1_error) __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; __pyx_t_6 = __Pyx_PyObject_IsTrue(__pyx_t_3); if (unlikely(__pyx_t_6 < 0)) __PYX_ERR(1, 828, __pyx_L1_error) __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; if (__pyx_t_6) { (__pyx_v_f[0]) = 0x68; goto __pyx_L15; } /* "C:/Anaconda2/lib/site-packages/Cython/Includes/numpy/__init__.pxd":829 * elif t == NPY_UBYTE: f[0] = 66 #"B" * elif t == NPY_SHORT: f[0] = 104 #"h" * elif t == NPY_USHORT: f[0] = 72 #"H" # <<<<<<<<<<<<<< * elif t == NPY_INT: f[0] = 105 #"i" * elif t == NPY_UINT: f[0] = 73 #"I" */ __pyx_t_3 = __Pyx_PyInt_From_enum__NPY_TYPES(NPY_USHORT); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 829, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __pyx_t_4 = PyObject_RichCompare(__pyx_v_t, __pyx_t_3, Py_EQ); __Pyx_XGOTREF(__pyx_t_4); if (unlikely(!__pyx_t_4)) __PYX_ERR(1, 829, __pyx_L1_error) __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; __pyx_t_6 = __Pyx_PyObject_IsTrue(__pyx_t_4); if (unlikely(__pyx_t_6 < 0)) __PYX_ERR(1, 829, __pyx_L1_error) __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; if (__pyx_t_6) { (__pyx_v_f[0]) = 72; goto __pyx_L15; } /* "C:/Anaconda2/lib/site-packages/Cython/Includes/numpy/__init__.pxd":830 * elif t == NPY_SHORT: f[0] = 104 #"h" * elif t == NPY_USHORT: f[0] = 72 #"H" * elif t == NPY_INT: f[0] = 105 #"i" # <<<<<<<<<<<<<< * elif t == NPY_UINT: f[0] = 73 #"I" * elif t == NPY_LONG: f[0] = 108 #"l" */ __pyx_t_4 = __Pyx_PyInt_From_enum__NPY_TYPES(NPY_INT); if (unlikely(!__pyx_t_4)) __PYX_ERR(1, 830, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_4); __pyx_t_3 = PyObject_RichCompare(__pyx_v_t, __pyx_t_4, Py_EQ); __Pyx_XGOTREF(__pyx_t_3); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 830, __pyx_L1_error) __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; __pyx_t_6 = __Pyx_PyObject_IsTrue(__pyx_t_3); if (unlikely(__pyx_t_6 < 0)) __PYX_ERR(1, 830, __pyx_L1_error) __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; if (__pyx_t_6) { (__pyx_v_f[0]) = 0x69; goto __pyx_L15; } /* "C:/Anaconda2/lib/site-packages/Cython/Includes/numpy/__init__.pxd":831 * elif t == NPY_USHORT: f[0] = 72 #"H" * elif t == NPY_INT: f[0] = 105 #"i" * elif t == NPY_UINT: f[0] = 73 #"I" # <<<<<<<<<<<<<< * elif t == NPY_LONG: f[0] = 108 #"l" * elif t == NPY_ULONG: f[0] = 76 #"L" */ __pyx_t_3 = __Pyx_PyInt_From_enum__NPY_TYPES(NPY_UINT); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 831, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __pyx_t_4 = PyObject_RichCompare(__pyx_v_t, __pyx_t_3, Py_EQ); __Pyx_XGOTREF(__pyx_t_4); if (unlikely(!__pyx_t_4)) __PYX_ERR(1, 831, __pyx_L1_error) __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; __pyx_t_6 = __Pyx_PyObject_IsTrue(__pyx_t_4); if (unlikely(__pyx_t_6 < 0)) __PYX_ERR(1, 831, __pyx_L1_error) __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; if (__pyx_t_6) { (__pyx_v_f[0]) = 73; goto __pyx_L15; } /* "C:/Anaconda2/lib/site-packages/Cython/Includes/numpy/__init__.pxd":832 * elif t == NPY_INT: f[0] = 105 #"i" * elif t == NPY_UINT: f[0] = 73 #"I" * elif t == NPY_LONG: f[0] = 108 #"l" # <<<<<<<<<<<<<< * elif t == NPY_ULONG: f[0] = 76 #"L" * elif t == NPY_LONGLONG: f[0] = 113 #"q" */ __pyx_t_4 = __Pyx_PyInt_From_enum__NPY_TYPES(NPY_LONG); if (unlikely(!__pyx_t_4)) __PYX_ERR(1, 832, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_4); __pyx_t_3 = PyObject_RichCompare(__pyx_v_t, __pyx_t_4, Py_EQ); __Pyx_XGOTREF(__pyx_t_3); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 832, __pyx_L1_error) __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; __pyx_t_6 = __Pyx_PyObject_IsTrue(__pyx_t_3); if (unlikely(__pyx_t_6 < 0)) __PYX_ERR(1, 832, __pyx_L1_error) __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; if (__pyx_t_6) { (__pyx_v_f[0]) = 0x6C; goto __pyx_L15; } /* "C:/Anaconda2/lib/site-packages/Cython/Includes/numpy/__init__.pxd":833 * elif t == NPY_UINT: f[0] = 73 #"I" * elif t == NPY_LONG: f[0] = 108 #"l" * elif t == NPY_ULONG: f[0] = 76 #"L" # <<<<<<<<<<<<<< * elif t == NPY_LONGLONG: f[0] = 113 #"q" * elif t == NPY_ULONGLONG: f[0] = 81 #"Q" */ __pyx_t_3 = __Pyx_PyInt_From_enum__NPY_TYPES(NPY_ULONG); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 833, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __pyx_t_4 = PyObject_RichCompare(__pyx_v_t, __pyx_t_3, Py_EQ); __Pyx_XGOTREF(__pyx_t_4); if (unlikely(!__pyx_t_4)) __PYX_ERR(1, 833, __pyx_L1_error) __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; __pyx_t_6 = __Pyx_PyObject_IsTrue(__pyx_t_4); if (unlikely(__pyx_t_6 < 0)) __PYX_ERR(1, 833, __pyx_L1_error) __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; if (__pyx_t_6) { (__pyx_v_f[0]) = 76; goto __pyx_L15; } /* "C:/Anaconda2/lib/site-packages/Cython/Includes/numpy/__init__.pxd":834 * elif t == NPY_LONG: f[0] = 108 #"l" * elif t == NPY_ULONG: f[0] = 76 #"L" * elif t == NPY_LONGLONG: f[0] = 113 #"q" # <<<<<<<<<<<<<< * elif t == NPY_ULONGLONG: f[0] = 81 #"Q" * elif t == NPY_FLOAT: f[0] = 102 #"f" */ __pyx_t_4 = __Pyx_PyInt_From_enum__NPY_TYPES(NPY_LONGLONG); if (unlikely(!__pyx_t_4)) __PYX_ERR(1, 834, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_4); __pyx_t_3 = PyObject_RichCompare(__pyx_v_t, __pyx_t_4, Py_EQ); __Pyx_XGOTREF(__pyx_t_3); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 834, __pyx_L1_error) __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; __pyx_t_6 = __Pyx_PyObject_IsTrue(__pyx_t_3); if (unlikely(__pyx_t_6 < 0)) __PYX_ERR(1, 834, __pyx_L1_error) __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; if (__pyx_t_6) { (__pyx_v_f[0]) = 0x71; goto __pyx_L15; } /* "C:/Anaconda2/lib/site-packages/Cython/Includes/numpy/__init__.pxd":835 * elif t == NPY_ULONG: f[0] = 76 #"L" * elif t == NPY_LONGLONG: f[0] = 113 #"q" * elif t == NPY_ULONGLONG: f[0] = 81 #"Q" # <<<<<<<<<<<<<< * elif t == NPY_FLOAT: f[0] = 102 #"f" * elif t == NPY_DOUBLE: f[0] = 100 #"d" */ __pyx_t_3 = __Pyx_PyInt_From_enum__NPY_TYPES(NPY_ULONGLONG); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 835, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __pyx_t_4 = PyObject_RichCompare(__pyx_v_t, __pyx_t_3, Py_EQ); __Pyx_XGOTREF(__pyx_t_4); if (unlikely(!__pyx_t_4)) __PYX_ERR(1, 835, __pyx_L1_error) __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; __pyx_t_6 = __Pyx_PyObject_IsTrue(__pyx_t_4); if (unlikely(__pyx_t_6 < 0)) __PYX_ERR(1, 835, __pyx_L1_error) __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; if (__pyx_t_6) { (__pyx_v_f[0]) = 81; goto __pyx_L15; } /* "C:/Anaconda2/lib/site-packages/Cython/Includes/numpy/__init__.pxd":836 * elif t == NPY_LONGLONG: f[0] = 113 #"q" * elif t == NPY_ULONGLONG: f[0] = 81 #"Q" * elif t == NPY_FLOAT: f[0] = 102 #"f" # <<<<<<<<<<<<<< * elif t == NPY_DOUBLE: f[0] = 100 #"d" * elif t == NPY_LONGDOUBLE: f[0] = 103 #"g" */ __pyx_t_4 = __Pyx_PyInt_From_enum__NPY_TYPES(NPY_FLOAT); if (unlikely(!__pyx_t_4)) __PYX_ERR(1, 836, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_4); __pyx_t_3 = PyObject_RichCompare(__pyx_v_t, __pyx_t_4, Py_EQ); __Pyx_XGOTREF(__pyx_t_3); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 836, __pyx_L1_error) __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; __pyx_t_6 = __Pyx_PyObject_IsTrue(__pyx_t_3); if (unlikely(__pyx_t_6 < 0)) __PYX_ERR(1, 836, __pyx_L1_error) __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; if (__pyx_t_6) { (__pyx_v_f[0]) = 0x66; goto __pyx_L15; } /* "C:/Anaconda2/lib/site-packages/Cython/Includes/numpy/__init__.pxd":837 * elif t == NPY_ULONGLONG: f[0] = 81 #"Q" * elif t == NPY_FLOAT: f[0] = 102 #"f" * elif t == NPY_DOUBLE: f[0] = 100 #"d" # <<<<<<<<<<<<<< * elif t == NPY_LONGDOUBLE: f[0] = 103 #"g" * elif t == NPY_CFLOAT: f[0] = 90; f[1] = 102; f += 1 # Zf */ __pyx_t_3 = __Pyx_PyInt_From_enum__NPY_TYPES(NPY_DOUBLE); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 837, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __pyx_t_4 = PyObject_RichCompare(__pyx_v_t, __pyx_t_3, Py_EQ); __Pyx_XGOTREF(__pyx_t_4); if (unlikely(!__pyx_t_4)) __PYX_ERR(1, 837, __pyx_L1_error) __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; __pyx_t_6 = __Pyx_PyObject_IsTrue(__pyx_t_4); if (unlikely(__pyx_t_6 < 0)) __PYX_ERR(1, 837, __pyx_L1_error) __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; if (__pyx_t_6) { (__pyx_v_f[0]) = 0x64; goto __pyx_L15; } /* "C:/Anaconda2/lib/site-packages/Cython/Includes/numpy/__init__.pxd":838 * elif t == NPY_FLOAT: f[0] = 102 #"f" * elif t == NPY_DOUBLE: f[0] = 100 #"d" * elif t == NPY_LONGDOUBLE: f[0] = 103 #"g" # <<<<<<<<<<<<<< * elif t == NPY_CFLOAT: f[0] = 90; f[1] = 102; f += 1 # Zf * elif t == NPY_CDOUBLE: f[0] = 90; f[1] = 100; f += 1 # Zd */ __pyx_t_4 = __Pyx_PyInt_From_enum__NPY_TYPES(NPY_LONGDOUBLE); if (unlikely(!__pyx_t_4)) __PYX_ERR(1, 838, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_4); __pyx_t_3 = PyObject_RichCompare(__pyx_v_t, __pyx_t_4, Py_EQ); __Pyx_XGOTREF(__pyx_t_3); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 838, __pyx_L1_error) __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; __pyx_t_6 = __Pyx_PyObject_IsTrue(__pyx_t_3); if (unlikely(__pyx_t_6 < 0)) __PYX_ERR(1, 838, __pyx_L1_error) __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; if (__pyx_t_6) { (__pyx_v_f[0]) = 0x67; goto __pyx_L15; } /* "C:/Anaconda2/lib/site-packages/Cython/Includes/numpy/__init__.pxd":839 * elif t == NPY_DOUBLE: f[0] = 100 #"d" * elif t == NPY_LONGDOUBLE: f[0] = 103 #"g" * elif t == NPY_CFLOAT: f[0] = 90; f[1] = 102; f += 1 # Zf # <<<<<<<<<<<<<< * elif t == NPY_CDOUBLE: f[0] = 90; f[1] = 100; f += 1 # Zd * elif t == NPY_CLONGDOUBLE: f[0] = 90; f[1] = 103; f += 1 # Zg */ __pyx_t_3 = __Pyx_PyInt_From_enum__NPY_TYPES(NPY_CFLOAT); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 839, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __pyx_t_4 = PyObject_RichCompare(__pyx_v_t, __pyx_t_3, Py_EQ); __Pyx_XGOTREF(__pyx_t_4); if (unlikely(!__pyx_t_4)) __PYX_ERR(1, 839, __pyx_L1_error) __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; __pyx_t_6 = __Pyx_PyObject_IsTrue(__pyx_t_4); if (unlikely(__pyx_t_6 < 0)) __PYX_ERR(1, 839, __pyx_L1_error) __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; if (__pyx_t_6) { (__pyx_v_f[0]) = 90; (__pyx_v_f[1]) = 0x66; __pyx_v_f = (__pyx_v_f + 1); goto __pyx_L15; } /* "C:/Anaconda2/lib/site-packages/Cython/Includes/numpy/__init__.pxd":840 * elif t == NPY_LONGDOUBLE: f[0] = 103 #"g" * elif t == NPY_CFLOAT: f[0] = 90; f[1] = 102; f += 1 # Zf * elif t == NPY_CDOUBLE: f[0] = 90; f[1] = 100; f += 1 # Zd # <<<<<<<<<<<<<< * elif t == NPY_CLONGDOUBLE: f[0] = 90; f[1] = 103; f += 1 # Zg * elif t == NPY_OBJECT: f[0] = 79 #"O" */ __pyx_t_4 = __Pyx_PyInt_From_enum__NPY_TYPES(NPY_CDOUBLE); if (unlikely(!__pyx_t_4)) __PYX_ERR(1, 840, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_4); __pyx_t_3 = PyObject_RichCompare(__pyx_v_t, __pyx_t_4, Py_EQ); __Pyx_XGOTREF(__pyx_t_3); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 840, __pyx_L1_error) __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; __pyx_t_6 = __Pyx_PyObject_IsTrue(__pyx_t_3); if (unlikely(__pyx_t_6 < 0)) __PYX_ERR(1, 840, __pyx_L1_error) __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; if (__pyx_t_6) { (__pyx_v_f[0]) = 90; (__pyx_v_f[1]) = 0x64; __pyx_v_f = (__pyx_v_f + 1); goto __pyx_L15; } /* "C:/Anaconda2/lib/site-packages/Cython/Includes/numpy/__init__.pxd":841 * elif t == NPY_CFLOAT: f[0] = 90; f[1] = 102; f += 1 # Zf * elif t == NPY_CDOUBLE: f[0] = 90; f[1] = 100; f += 1 # Zd * elif t == NPY_CLONGDOUBLE: f[0] = 90; f[1] = 103; f += 1 # Zg # <<<<<<<<<<<<<< * elif t == NPY_OBJECT: f[0] = 79 #"O" * else: */ __pyx_t_3 = __Pyx_PyInt_From_enum__NPY_TYPES(NPY_CLONGDOUBLE); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 841, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __pyx_t_4 = PyObject_RichCompare(__pyx_v_t, __pyx_t_3, Py_EQ); __Pyx_XGOTREF(__pyx_t_4); if (unlikely(!__pyx_t_4)) __PYX_ERR(1, 841, __pyx_L1_error) __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; __pyx_t_6 = __Pyx_PyObject_IsTrue(__pyx_t_4); if (unlikely(__pyx_t_6 < 0)) __PYX_ERR(1, 841, __pyx_L1_error) __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; if (__pyx_t_6) { (__pyx_v_f[0]) = 90; (__pyx_v_f[1]) = 0x67; __pyx_v_f = (__pyx_v_f + 1); goto __pyx_L15; } /* "C:/Anaconda2/lib/site-packages/Cython/Includes/numpy/__init__.pxd":842 * elif t == NPY_CDOUBLE: f[0] = 90; f[1] = 100; f += 1 # Zd * elif t == NPY_CLONGDOUBLE: f[0] = 90; f[1] = 103; f += 1 # Zg * elif t == NPY_OBJECT: f[0] = 79 #"O" # <<<<<<<<<<<<<< * else: * raise ValueError(u"unknown dtype code in numpy.pxd (%d)" % t) */ __pyx_t_4 = __Pyx_PyInt_From_enum__NPY_TYPES(NPY_OBJECT); if (unlikely(!__pyx_t_4)) __PYX_ERR(1, 842, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_4); __pyx_t_3 = PyObject_RichCompare(__pyx_v_t, __pyx_t_4, Py_EQ); __Pyx_XGOTREF(__pyx_t_3); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 842, __pyx_L1_error) __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; __pyx_t_6 = __Pyx_PyObject_IsTrue(__pyx_t_3); if (unlikely(__pyx_t_6 < 0)) __PYX_ERR(1, 842, __pyx_L1_error) __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; if (__pyx_t_6) { (__pyx_v_f[0]) = 79; goto __pyx_L15; } /* "C:/Anaconda2/lib/site-packages/Cython/Includes/numpy/__init__.pxd":844 * elif t == NPY_OBJECT: f[0] = 79 #"O" * else: * raise ValueError(u"unknown dtype code in numpy.pxd (%d)" % t) # <<<<<<<<<<<<<< * f += 1 * else: */ /*else*/ { __pyx_t_3 = PyUnicode_Format(__pyx_kp_u_unknown_dtype_code_in_numpy_pxd, __pyx_v_t); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 844, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __pyx_t_4 = PyTuple_New(1); if (unlikely(!__pyx_t_4)) __PYX_ERR(1, 844, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_4); __Pyx_GIVEREF(__pyx_t_3); PyTuple_SET_ITEM(__pyx_t_4, 0, __pyx_t_3); __pyx_t_3 = 0; __pyx_t_3 = __Pyx_PyObject_Call(__pyx_builtin_ValueError, __pyx_t_4, NULL); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 844, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; __Pyx_Raise(__pyx_t_3, 0, 0, 0); __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; __PYX_ERR(1, 844, __pyx_L1_error) } __pyx_L15:; /* "C:/Anaconda2/lib/site-packages/Cython/Includes/numpy/__init__.pxd":845 * else: * raise ValueError(u"unknown dtype code in numpy.pxd (%d)" % t) * f += 1 # <<<<<<<<<<<<<< * else: * # Cython ignores struct boundary information ("T{...}"), */ __pyx_v_f = (__pyx_v_f + 1); /* "C:/Anaconda2/lib/site-packages/Cython/Includes/numpy/__init__.pxd":820 * offset[0] += child.itemsize * * if not PyDataType_HASFIELDS(child): # <<<<<<<<<<<<<< * t = child.type_num * if end - f < 5: */ goto __pyx_L13; } /* "C:/Anaconda2/lib/site-packages/Cython/Includes/numpy/__init__.pxd":849 * # Cython ignores struct boundary information ("T{...}"), * # so don't output it * f = _util_dtypestring(child, f, end, offset) # <<<<<<<<<<<<<< * return f * */ /*else*/ { __pyx_t_9 = __pyx_f_5numpy__util_dtypestring(__pyx_v_child, __pyx_v_f, __pyx_v_end, __pyx_v_offset); if (unlikely(__pyx_t_9 == NULL)) __PYX_ERR(1, 849, __pyx_L1_error) __pyx_v_f = __pyx_t_9; } __pyx_L13:; /* "C:/Anaconda2/lib/site-packages/Cython/Includes/numpy/__init__.pxd":794 * cdef tuple fields * * for childname in descr.names: # <<<<<<<<<<<<<< * fields = descr.fields[childname] * child, new_offset = fields */ } __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; /* "C:/Anaconda2/lib/site-packages/Cython/Includes/numpy/__init__.pxd":850 * # so don't output it * f = _util_dtypestring(child, f, end, offset) * return f # <<<<<<<<<<<<<< * * */ __pyx_r = __pyx_v_f; goto __pyx_L0; /* "C:/Anaconda2/lib/site-packages/Cython/Includes/numpy/__init__.pxd":785 * return PyArray_MultiIterNew(5, <void*>a, <void*>b, <void*>c, <void*> d, <void*> e) * * cdef inline char* _util_dtypestring(dtype descr, char* f, char* end, int* offset) except NULL: # <<<<<<<<<<<<<< * # Recursive utility function used in __getbuffer__ to get format * # string. The new location in the format string is returned. */ /* function exit code */ __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_1); __Pyx_XDECREF(__pyx_t_3); __Pyx_XDECREF(__pyx_t_4); __Pyx_AddTraceback("numpy._util_dtypestring", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = NULL; __pyx_L0:; __Pyx_XDECREF((PyObject *)__pyx_v_child); __Pyx_XDECREF(__pyx_v_fields); __Pyx_XDECREF(__pyx_v_childname); __Pyx_XDECREF(__pyx_v_new_offset); __Pyx_XDECREF(__pyx_v_t); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "C:/Anaconda2/lib/site-packages/Cython/Includes/numpy/__init__.pxd":966 * * * cdef inline void set_array_base(ndarray arr, object base): # <<<<<<<<<<<<<< * cdef PyObject* baseptr * if base is None: */ static CYTHON_INLINE void __pyx_f_5numpy_set_array_base(PyArrayObject *__pyx_v_arr, PyObject *__pyx_v_base) { PyObject *__pyx_v_baseptr; __Pyx_RefNannyDeclarations int __pyx_t_1; int __pyx_t_2; __Pyx_RefNannySetupContext("set_array_base", 0); /* "C:/Anaconda2/lib/site-packages/Cython/Includes/numpy/__init__.pxd":968 * cdef inline void set_array_base(ndarray arr, object base): * cdef PyObject* baseptr * if base is None: # <<<<<<<<<<<<<< * baseptr = NULL * else: */ __pyx_t_1 = (__pyx_v_base == Py_None); __pyx_t_2 = (__pyx_t_1 != 0); if (__pyx_t_2) { /* "C:/Anaconda2/lib/site-packages/Cython/Includes/numpy/__init__.pxd":969 * cdef PyObject* baseptr * if base is None: * baseptr = NULL # <<<<<<<<<<<<<< * else: * Py_INCREF(base) # important to do this before decref below! */ __pyx_v_baseptr = NULL; /* "C:/Anaconda2/lib/site-packages/Cython/Includes/numpy/__init__.pxd":968 * cdef inline void set_array_base(ndarray arr, object base): * cdef PyObject* baseptr * if base is None: # <<<<<<<<<<<<<< * baseptr = NULL * else: */ goto __pyx_L3; } /* "C:/Anaconda2/lib/site-packages/Cython/Includes/numpy/__init__.pxd":971 * baseptr = NULL * else: * Py_INCREF(base) # important to do this before decref below! # <<<<<<<<<<<<<< * baseptr = <PyObject*>base * Py_XDECREF(arr.base) */ /*else*/ { Py_INCREF(__pyx_v_base); /* "C:/Anaconda2/lib/site-packages/Cython/Includes/numpy/__init__.pxd":972 * else: * Py_INCREF(base) # important to do this before decref below! * baseptr = <PyObject*>base # <<<<<<<<<<<<<< * Py_XDECREF(arr.base) * arr.base = baseptr */ __pyx_v_baseptr = ((PyObject *)__pyx_v_base); } __pyx_L3:; /* "C:/Anaconda2/lib/site-packages/Cython/Includes/numpy/__init__.pxd":973 * Py_INCREF(base) # important to do this before decref below! * baseptr = <PyObject*>base * Py_XDECREF(arr.base) # <<<<<<<<<<<<<< * arr.base = baseptr * */ Py_XDECREF(__pyx_v_arr->base); /* "C:/Anaconda2/lib/site-packages/Cython/Includes/numpy/__init__.pxd":974 * baseptr = <PyObject*>base * Py_XDECREF(arr.base) * arr.base = baseptr # <<<<<<<<<<<<<< * * cdef inline object get_array_base(ndarray arr): */ __pyx_v_arr->base = __pyx_v_baseptr; /* "C:/Anaconda2/lib/site-packages/Cython/Includes/numpy/__init__.pxd":966 * * * cdef inline void set_array_base(ndarray arr, object base): # <<<<<<<<<<<<<< * cdef PyObject* baseptr * if base is None: */ /* function exit code */ __Pyx_RefNannyFinishContext(); } /* "C:/Anaconda2/lib/site-packages/Cython/Includes/numpy/__init__.pxd":976 * arr.base = baseptr * * cdef inline object get_array_base(ndarray arr): # <<<<<<<<<<<<<< * if arr.base is NULL: * return None */ static CYTHON_INLINE PyObject *__pyx_f_5numpy_get_array_base(PyArrayObject *__pyx_v_arr) { PyObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations int __pyx_t_1; __Pyx_RefNannySetupContext("get_array_base", 0); /* "C:/Anaconda2/lib/site-packages/Cython/Includes/numpy/__init__.pxd":977 * * cdef inline object get_array_base(ndarray arr): * if arr.base is NULL: # <<<<<<<<<<<<<< * return None * else: */ __pyx_t_1 = ((__pyx_v_arr->base == NULL) != 0); if (__pyx_t_1) { /* "C:/Anaconda2/lib/site-packages/Cython/Includes/numpy/__init__.pxd":978 * cdef inline object get_array_base(ndarray arr): * if arr.base is NULL: * return None # <<<<<<<<<<<<<< * else: * return <object>arr.base */ __Pyx_XDECREF(__pyx_r); __Pyx_INCREF(Py_None); __pyx_r = Py_None; goto __pyx_L0; /* "C:/Anaconda2/lib/site-packages/Cython/Includes/numpy/__init__.pxd":977 * * cdef inline object get_array_base(ndarray arr): * if arr.base is NULL: # <<<<<<<<<<<<<< * return None * else: */ } /* "C:/Anaconda2/lib/site-packages/Cython/Includes/numpy/__init__.pxd":980 * return None * else: * return <object>arr.base # <<<<<<<<<<<<<< */ /*else*/ { __Pyx_XDECREF(__pyx_r); __Pyx_INCREF(((PyObject *)__pyx_v_arr->base)); __pyx_r = ((PyObject *)__pyx_v_arr->base); goto __pyx_L0; } /* "C:/Anaconda2/lib/site-packages/Cython/Includes/numpy/__init__.pxd":976 * arr.base = baseptr * * cdef inline object get_array_base(ndarray arr): # <<<<<<<<<<<<<< * if arr.base is NULL: * return None */ /* function exit code */ __pyx_L0:; __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } static PyMethodDef __pyx_methods[] = { {0, 0, 0, 0} }; #if PY_MAJOR_VERSION >= 3 static struct PyModuleDef __pyx_moduledef = { #if PY_VERSION_HEX < 0x03020000 { PyObject_HEAD_INIT(NULL) NULL, 0, NULL }, #else PyModuleDef_HEAD_INIT, #endif "gpu_nms", 0, /* m_doc */ -1, /* m_size */ __pyx_methods /* m_methods */, NULL, /* m_reload */ NULL, /* m_traverse */ NULL, /* m_clear */ NULL /* m_free */ }; #endif static __Pyx_StringTabEntry __pyx_string_tab[] = { {&__pyx_kp_s_D_v_zix_caffe_caffe_win_20160523, __pyx_k_D_v_zix_caffe_caffe_win_20160523, sizeof(__pyx_k_D_v_zix_caffe_caffe_win_20160523), 0, 0, 1, 0}, {&__pyx_kp_u_Format_string_allocated_too_shor, __pyx_k_Format_string_allocated_too_shor, sizeof(__pyx_k_Format_string_allocated_too_shor), 0, 1, 0, 0}, {&__pyx_kp_u_Format_string_allocated_too_shor_2, __pyx_k_Format_string_allocated_too_shor_2, sizeof(__pyx_k_Format_string_allocated_too_shor_2), 0, 1, 0, 0}, {&__pyx_kp_u_Non_native_byte_order_not_suppor, __pyx_k_Non_native_byte_order_not_suppor, sizeof(__pyx_k_Non_native_byte_order_not_suppor), 0, 1, 0, 0}, {&__pyx_n_s_RuntimeError, __pyx_k_RuntimeError, sizeof(__pyx_k_RuntimeError), 0, 0, 1, 1}, {&__pyx_n_s_ValueError, __pyx_k_ValueError, sizeof(__pyx_k_ValueError), 0, 0, 1, 1}, {&__pyx_n_s_argsort, __pyx_k_argsort, sizeof(__pyx_k_argsort), 0, 0, 1, 1}, {&__pyx_n_s_boxes_dim, __pyx_k_boxes_dim, sizeof(__pyx_k_boxes_dim), 0, 0, 1, 1}, {&__pyx_n_s_boxes_num, __pyx_k_boxes_num, sizeof(__pyx_k_boxes_num), 0, 0, 1, 1}, {&__pyx_n_s_dets, __pyx_k_dets, sizeof(__pyx_k_dets), 0, 0, 1, 1}, {&__pyx_n_s_device_id, __pyx_k_device_id, sizeof(__pyx_k_device_id), 0, 0, 1, 1}, {&__pyx_n_s_dtype, __pyx_k_dtype, sizeof(__pyx_k_dtype), 0, 0, 1, 1}, {&__pyx_n_s_gpu_nms, __pyx_k_gpu_nms, sizeof(__pyx_k_gpu_nms), 0, 0, 1, 1}, {&__pyx_n_s_import, __pyx_k_import, sizeof(__pyx_k_import), 0, 0, 1, 1}, {&__pyx_n_s_int32, __pyx_k_int32, sizeof(__pyx_k_int32), 0, 0, 1, 1}, {&__pyx_n_s_keep, __pyx_k_keep, sizeof(__pyx_k_keep), 0, 0, 1, 1}, {&__pyx_n_s_main, __pyx_k_main, sizeof(__pyx_k_main), 0, 0, 1, 1}, {&__pyx_kp_u_ndarray_is_not_C_contiguous, __pyx_k_ndarray_is_not_C_contiguous, sizeof(__pyx_k_ndarray_is_not_C_contiguous), 0, 1, 0, 0}, {&__pyx_kp_u_ndarray_is_not_Fortran_contiguou, __pyx_k_ndarray_is_not_Fortran_contiguou, sizeof(__pyx_k_ndarray_is_not_Fortran_contiguou), 0, 1, 0, 0}, {&__pyx_n_s_nms_gpu_nms, __pyx_k_nms_gpu_nms, sizeof(__pyx_k_nms_gpu_nms), 0, 0, 1, 1}, {&__pyx_n_s_np, __pyx_k_np, sizeof(__pyx_k_np), 0, 0, 1, 1}, {&__pyx_n_s_num_out, __pyx_k_num_out, sizeof(__pyx_k_num_out), 0, 0, 1, 1}, {&__pyx_n_s_numpy, __pyx_k_numpy, sizeof(__pyx_k_numpy), 0, 0, 1, 1}, {&__pyx_n_s_order, __pyx_k_order, sizeof(__pyx_k_order), 0, 0, 1, 1}, {&__pyx_n_s_range, __pyx_k_range, sizeof(__pyx_k_range), 0, 0, 1, 1}, {&__pyx_n_s_scores, __pyx_k_scores, sizeof(__pyx_k_scores), 0, 0, 1, 1}, {&__pyx_n_s_sorted_dets, __pyx_k_sorted_dets, sizeof(__pyx_k_sorted_dets), 0, 0, 1, 1}, {&__pyx_n_s_test, __pyx_k_test, sizeof(__pyx_k_test), 0, 0, 1, 1}, {&__pyx_n_s_thresh, __pyx_k_thresh, sizeof(__pyx_k_thresh), 0, 0, 1, 1}, {&__pyx_kp_u_unknown_dtype_code_in_numpy_pxd, __pyx_k_unknown_dtype_code_in_numpy_pxd, sizeof(__pyx_k_unknown_dtype_code_in_numpy_pxd), 0, 1, 0, 0}, {&__pyx_n_s_zeros, __pyx_k_zeros, sizeof(__pyx_k_zeros), 0, 0, 1, 1}, {0, 0, 0, 0, 0, 0, 0} }; static int __Pyx_InitCachedBuiltins(void) { __pyx_builtin_ValueError = __Pyx_GetBuiltinName(__pyx_n_s_ValueError); if (!__pyx_builtin_ValueError) __PYX_ERR(1, 218, __pyx_L1_error) __pyx_builtin_range = __Pyx_GetBuiltinName(__pyx_n_s_range); if (!__pyx_builtin_range) __PYX_ERR(1, 231, __pyx_L1_error) __pyx_builtin_RuntimeError = __Pyx_GetBuiltinName(__pyx_n_s_RuntimeError); if (!__pyx_builtin_RuntimeError) __PYX_ERR(1, 799, __pyx_L1_error) return 0; __pyx_L1_error:; return -1; } static int __Pyx_InitCachedConstants(void) { __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("__Pyx_InitCachedConstants", 0); /* "nms/gpu_nms.pyx":24 * keep = np.zeros(boxes_num, dtype=np.int32) * cdef np.ndarray[np.float32_t, ndim=1] \ * scores = dets[:, 4] # <<<<<<<<<<<<<< * #cdef np.ndarray[np.int_t, ndim=1] \ // 20160601, by xzn * # order = scores.argsort()[::-1] */ __pyx_slice_ = PySlice_New(Py_None, Py_None, Py_None); if (unlikely(!__pyx_slice_)) __PYX_ERR(0, 24, __pyx_L1_error) __Pyx_GOTREF(__pyx_slice_); __Pyx_GIVEREF(__pyx_slice_); __pyx_tuple__2 = PyTuple_Pack(2, __pyx_slice_, __pyx_int_4); if (unlikely(!__pyx_tuple__2)) __PYX_ERR(0, 24, __pyx_L1_error) __Pyx_GOTREF(__pyx_tuple__2); __Pyx_GIVEREF(__pyx_tuple__2); /* "nms/gpu_nms.pyx":28 * # order = scores.argsort()[::-1] * cdef np.ndarray[np.intp_t, ndim=1] \ * order = scores.argsort()[::-1] # <<<<<<<<<<<<<< * cdef np.ndarray[np.float32_t, ndim=2] \ * sorted_dets = dets[order, :] */ __pyx_slice__3 = PySlice_New(Py_None, Py_None, __pyx_int_neg_1); if (unlikely(!__pyx_slice__3)) __PYX_ERR(0, 28, __pyx_L1_error) __Pyx_GOTREF(__pyx_slice__3); __Pyx_GIVEREF(__pyx_slice__3); /* "nms/gpu_nms.pyx":30 * order = scores.argsort()[::-1] * cdef np.ndarray[np.float32_t, ndim=2] \ * sorted_dets = dets[order, :] # <<<<<<<<<<<<<< * _nms(&keep[0], &num_out, &sorted_dets[0, 0], boxes_num, boxes_dim, thresh, device_id) * keep = keep[:num_out] */ __pyx_slice__4 = PySlice_New(Py_None, Py_None, Py_None); if (unlikely(!__pyx_slice__4)) __PYX_ERR(0, 30, __pyx_L1_error) __Pyx_GOTREF(__pyx_slice__4); __Pyx_GIVEREF(__pyx_slice__4); /* "C:/Anaconda2/lib/site-packages/Cython/Includes/numpy/__init__.pxd":218 * if ((flags & pybuf.PyBUF_C_CONTIGUOUS == pybuf.PyBUF_C_CONTIGUOUS) * and not PyArray_CHKFLAGS(self, NPY_C_CONTIGUOUS)): * raise ValueError(u"ndarray is not C contiguous") # <<<<<<<<<<<<<< * * if ((flags & pybuf.PyBUF_F_CONTIGUOUS == pybuf.PyBUF_F_CONTIGUOUS) */ __pyx_tuple__5 = PyTuple_Pack(1, __pyx_kp_u_ndarray_is_not_C_contiguous); if (unlikely(!__pyx_tuple__5)) __PYX_ERR(1, 218, __pyx_L1_error) __Pyx_GOTREF(__pyx_tuple__5); __Pyx_GIVEREF(__pyx_tuple__5); /* "C:/Anaconda2/lib/site-packages/Cython/Includes/numpy/__init__.pxd":222 * if ((flags & pybuf.PyBUF_F_CONTIGUOUS == pybuf.PyBUF_F_CONTIGUOUS) * and not PyArray_CHKFLAGS(self, NPY_F_CONTIGUOUS)): * raise ValueError(u"ndarray is not Fortran contiguous") # <<<<<<<<<<<<<< * * info.buf = PyArray_DATA(self) */ __pyx_tuple__6 = PyTuple_Pack(1, __pyx_kp_u_ndarray_is_not_Fortran_contiguou); if (unlikely(!__pyx_tuple__6)) __PYX_ERR(1, 222, __pyx_L1_error) __Pyx_GOTREF(__pyx_tuple__6); __Pyx_GIVEREF(__pyx_tuple__6); /* "C:/Anaconda2/lib/site-packages/Cython/Includes/numpy/__init__.pxd":259 * if ((descr.byteorder == c'>' and little_endian) or * (descr.byteorder == c'<' and not little_endian)): * raise ValueError(u"Non-native byte order not supported") # <<<<<<<<<<<<<< * if t == NPY_BYTE: f = "b" * elif t == NPY_UBYTE: f = "B" */ __pyx_tuple__7 = PyTuple_Pack(1, __pyx_kp_u_Non_native_byte_order_not_suppor); if (unlikely(!__pyx_tuple__7)) __PYX_ERR(1, 259, __pyx_L1_error) __Pyx_GOTREF(__pyx_tuple__7); __Pyx_GIVEREF(__pyx_tuple__7); /* "C:/Anaconda2/lib/site-packages/Cython/Includes/numpy/__init__.pxd":799 * * if (end - f) - <int>(new_offset - offset[0]) < 15: * raise RuntimeError(u"Format string allocated too short, see comment in numpy.pxd") # <<<<<<<<<<<<<< * * if ((child.byteorder == c'>' and little_endian) or */ __pyx_tuple__8 = PyTuple_Pack(1, __pyx_kp_u_Format_string_allocated_too_shor); if (unlikely(!__pyx_tuple__8)) __PYX_ERR(1, 799, __pyx_L1_error) __Pyx_GOTREF(__pyx_tuple__8); __Pyx_GIVEREF(__pyx_tuple__8); /* "C:/Anaconda2/lib/site-packages/Cython/Includes/numpy/__init__.pxd":803 * if ((child.byteorder == c'>' and little_endian) or * (child.byteorder == c'<' and not little_endian)): * raise ValueError(u"Non-native byte order not supported") # <<<<<<<<<<<<<< * # One could encode it in the format string and have Cython * # complain instead, BUT: < and > in format strings also imply */ __pyx_tuple__9 = PyTuple_Pack(1, __pyx_kp_u_Non_native_byte_order_not_suppor); if (unlikely(!__pyx_tuple__9)) __PYX_ERR(1, 803, __pyx_L1_error) __Pyx_GOTREF(__pyx_tuple__9); __Pyx_GIVEREF(__pyx_tuple__9); /* "C:/Anaconda2/lib/site-packages/Cython/Includes/numpy/__init__.pxd":823 * t = child.type_num * if end - f < 5: * raise RuntimeError(u"Format string allocated too short.") # <<<<<<<<<<<<<< * * # Until ticket #99 is fixed, use integers to avoid warnings */ __pyx_tuple__10 = PyTuple_Pack(1, __pyx_kp_u_Format_string_allocated_too_shor_2); if (unlikely(!__pyx_tuple__10)) __PYX_ERR(1, 823, __pyx_L1_error) __Pyx_GOTREF(__pyx_tuple__10); __Pyx_GIVEREF(__pyx_tuple__10); /* "nms/gpu_nms.pyx":16 * void _nms(np.int32_t*, int*, np.float32_t*, int, int, float, int) * * def gpu_nms(np.ndarray[np.float32_t, ndim=2] dets, np.float thresh, # <<<<<<<<<<<<<< * np.int32_t device_id=0): * cdef int boxes_num = dets.shape[0] */ __pyx_tuple__11 = PyTuple_Pack(10, __pyx_n_s_dets, __pyx_n_s_thresh, __pyx_n_s_device_id, __pyx_n_s_boxes_num, __pyx_n_s_boxes_dim, __pyx_n_s_num_out, __pyx_n_s_keep, __pyx_n_s_scores, __pyx_n_s_order, __pyx_n_s_sorted_dets); if (unlikely(!__pyx_tuple__11)) __PYX_ERR(0, 16, __pyx_L1_error) __Pyx_GOTREF(__pyx_tuple__11); __Pyx_GIVEREF(__pyx_tuple__11); __pyx_codeobj__12 = (PyObject*)__Pyx_PyCode_New(3, 0, 10, 0, 0, __pyx_empty_bytes, __pyx_empty_tuple, __pyx_empty_tuple, __pyx_tuple__11, __pyx_empty_tuple, __pyx_empty_tuple, __pyx_kp_s_D_v_zix_caffe_caffe_win_20160523, __pyx_n_s_gpu_nms, 16, __pyx_empty_bytes); if (unlikely(!__pyx_codeobj__12)) __PYX_ERR(0, 16, __pyx_L1_error) __Pyx_RefNannyFinishContext(); return 0; __pyx_L1_error:; __Pyx_RefNannyFinishContext(); return -1; } static int __Pyx_InitGlobals(void) { if (__Pyx_InitStrings(__pyx_string_tab) < 0) __PYX_ERR(0, 1, __pyx_L1_error); __pyx_int_4 = PyInt_FromLong(4); if (unlikely(!__pyx_int_4)) __PYX_ERR(0, 1, __pyx_L1_error) __pyx_int_neg_1 = PyInt_FromLong(-1); if (unlikely(!__pyx_int_neg_1)) __PYX_ERR(0, 1, __pyx_L1_error) return 0; __pyx_L1_error:; return -1; } #if PY_MAJOR_VERSION < 3 PyMODINIT_FUNC initgpu_nms(void); /*proto*/ PyMODINIT_FUNC initgpu_nms(void) #else PyMODINIT_FUNC PyInit_gpu_nms(void); /*proto*/ PyMODINIT_FUNC PyInit_gpu_nms(void) #endif { PyObject *__pyx_t_1 = NULL; __Pyx_RefNannyDeclarations #if CYTHON_REFNANNY __Pyx_RefNanny = __Pyx_RefNannyImportAPI("refnanny"); if (!__Pyx_RefNanny) { PyErr_Clear(); __Pyx_RefNanny = __Pyx_RefNannyImportAPI("Cython.Runtime.refnanny"); if (!__Pyx_RefNanny) Py_FatalError("failed to import 'refnanny' module"); } #endif __Pyx_RefNannySetupContext("PyMODINIT_FUNC PyInit_gpu_nms(void)", 0); if (__Pyx_check_binary_version() < 0) __PYX_ERR(0, 1, __pyx_L1_error) __pyx_empty_tuple = PyTuple_New(0); if (unlikely(!__pyx_empty_tuple)) __PYX_ERR(0, 1, __pyx_L1_error) __pyx_empty_bytes = PyBytes_FromStringAndSize("", 0); if (unlikely(!__pyx_empty_bytes)) __PYX_ERR(0, 1, __pyx_L1_error) __pyx_empty_unicode = PyUnicode_FromStringAndSize("", 0); if (unlikely(!__pyx_empty_unicode)) __PYX_ERR(0, 1, __pyx_L1_error) #ifdef __Pyx_CyFunction_USED if (__pyx_CyFunction_init() < 0) __PYX_ERR(0, 1, __pyx_L1_error) #endif #ifdef __Pyx_FusedFunction_USED if (__pyx_FusedFunction_init() < 0) __PYX_ERR(0, 1, __pyx_L1_error) #endif #ifdef __Pyx_Coroutine_USED if (__pyx_Coroutine_init() < 0) __PYX_ERR(0, 1, __pyx_L1_error) #endif #ifdef __Pyx_Generator_USED if (__pyx_Generator_init() < 0) __PYX_ERR(0, 1, __pyx_L1_error) #endif #ifdef __Pyx_StopAsyncIteration_USED if (__pyx_StopAsyncIteration_init() < 0) __PYX_ERR(0, 1, __pyx_L1_error) #endif /*--- Library function declarations ---*/ /*--- Threads initialization code ---*/ #if defined(__PYX_FORCE_INIT_THREADS) && __PYX_FORCE_INIT_THREADS #ifdef WITH_THREAD /* Python build with threading support? */ PyEval_InitThreads(); #endif #endif /*--- Module creation code ---*/ #if PY_MAJOR_VERSION < 3 __pyx_m = Py_InitModule4("gpu_nms", __pyx_methods, 0, 0, PYTHON_API_VERSION); Py_XINCREF(__pyx_m); #else __pyx_m = PyModule_Create(&__pyx_moduledef); #endif if (unlikely(!__pyx_m)) __PYX_ERR(0, 1, __pyx_L1_error) __pyx_d = PyModule_GetDict(__pyx_m); if (unlikely(!__pyx_d)) __PYX_ERR(0, 1, __pyx_L1_error) Py_INCREF(__pyx_d); __pyx_b = PyImport_AddModule(__Pyx_BUILTIN_MODULE_NAME); if (unlikely(!__pyx_b)) __PYX_ERR(0, 1, __pyx_L1_error) #if CYTHON_COMPILING_IN_PYPY Py_INCREF(__pyx_b); #endif if (PyObject_SetAttrString(__pyx_m, "__builtins__", __pyx_b) < 0) __PYX_ERR(0, 1, __pyx_L1_error); /*--- Initialize various global constants etc. ---*/ if (__Pyx_InitGlobals() < 0) __PYX_ERR(0, 1, __pyx_L1_error) #if PY_MAJOR_VERSION < 3 && (__PYX_DEFAULT_STRING_ENCODING_IS_ASCII || __PYX_DEFAULT_STRING_ENCODING_IS_DEFAULT) if (__Pyx_init_sys_getdefaultencoding_params() < 0) __PYX_ERR(0, 1, __pyx_L1_error) #endif if (__pyx_module_is_main_nms__gpu_nms) { if (PyObject_SetAttrString(__pyx_m, "__name__", __pyx_n_s_main) < 0) __PYX_ERR(0, 1, __pyx_L1_error) } #if PY_MAJOR_VERSION >= 3 { PyObject *modules = PyImport_GetModuleDict(); if (unlikely(!modules)) __PYX_ERR(0, 1, __pyx_L1_error) if (!PyDict_GetItemString(modules, "nms.gpu_nms")) { if (unlikely(PyDict_SetItemString(modules, "nms.gpu_nms", __pyx_m) < 0)) __PYX_ERR(0, 1, __pyx_L1_error) } } #endif /*--- Builtin init code ---*/ if (__Pyx_InitCachedBuiltins() < 0) __PYX_ERR(0, 1, __pyx_L1_error) /*--- Constants init code ---*/ if (__Pyx_InitCachedConstants() < 0) __PYX_ERR(0, 1, __pyx_L1_error) /*--- Global init code ---*/ /*--- Variable export code ---*/ /*--- Function export code ---*/ /*--- Type init code ---*/ /*--- Type import code ---*/ __pyx_ptype_7cpython_4type_type = __Pyx_ImportType(__Pyx_BUILTIN_MODULE_NAME, "type", #if CYTHON_COMPILING_IN_PYPY sizeof(PyTypeObject), #else sizeof(PyHeapTypeObject), #endif 0); if (unlikely(!__pyx_ptype_7cpython_4type_type)) __PYX_ERR(2, 9, __pyx_L1_error) __pyx_ptype_5numpy_dtype = __Pyx_ImportType("numpy", "dtype", sizeof(PyArray_Descr), 0); if (unlikely(!__pyx_ptype_5numpy_dtype)) __PYX_ERR(1, 155, __pyx_L1_error) __pyx_ptype_5numpy_flatiter = __Pyx_ImportType("numpy", "flatiter", sizeof(PyArrayIterObject), 0); if (unlikely(!__pyx_ptype_5numpy_flatiter)) __PYX_ERR(1, 168, __pyx_L1_error) __pyx_ptype_5numpy_broadcast = __Pyx_ImportType("numpy", "broadcast", sizeof(PyArrayMultiIterObject), 0); if (unlikely(!__pyx_ptype_5numpy_broadcast)) __PYX_ERR(1, 172, __pyx_L1_error) __pyx_ptype_5numpy_ndarray = __Pyx_ImportType("numpy", "ndarray", sizeof(PyArrayObject), 0); if (unlikely(!__pyx_ptype_5numpy_ndarray)) __PYX_ERR(1, 181, __pyx_L1_error) __pyx_ptype_5numpy_ufunc = __Pyx_ImportType("numpy", "ufunc", sizeof(PyUFuncObject), 0); if (unlikely(!__pyx_ptype_5numpy_ufunc)) __PYX_ERR(1, 861, __pyx_L1_error) /*--- Variable import code ---*/ /*--- Function import code ---*/ /*--- Execution code ---*/ #if defined(__Pyx_Generator_USED) || defined(__Pyx_Coroutine_USED) if (__Pyx_patch_abc() < 0) __PYX_ERR(0, 1, __pyx_L1_error) #endif /* "nms/gpu_nms.pyx":8 * # -------------------------------------------------------- * * import numpy as np # <<<<<<<<<<<<<< * cimport numpy as np * */ __pyx_t_1 = __Pyx_Import(__pyx_n_s_numpy, 0, -1); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 8, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); if (PyDict_SetItem(__pyx_d, __pyx_n_s_np, __pyx_t_1) < 0) __PYX_ERR(0, 8, __pyx_L1_error) __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; /* "nms/gpu_nms.pyx":11 * cimport numpy as np * * assert sizeof(int) == sizeof(np.int32_t) # <<<<<<<<<<<<<< * * cdef extern from "gpu_nms.hpp": */ #ifndef CYTHON_WITHOUT_ASSERTIONS if (unlikely(!Py_OptimizeFlag)) { if (unlikely(!(((sizeof(int)) == (sizeof(__pyx_t_5numpy_int32_t))) != 0))) { PyErr_SetNone(PyExc_AssertionError); __PYX_ERR(0, 11, __pyx_L1_error) } } #endif /* "nms/gpu_nms.pyx":16 * void _nms(np.int32_t*, int*, np.float32_t*, int, int, float, int) * * def gpu_nms(np.ndarray[np.float32_t, ndim=2] dets, np.float thresh, # <<<<<<<<<<<<<< * np.int32_t device_id=0): * cdef int boxes_num = dets.shape[0] */ __pyx_t_1 = PyCFunction_NewEx(&__pyx_mdef_3nms_7gpu_nms_1gpu_nms, NULL, __pyx_n_s_nms_gpu_nms); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 16, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); if (PyDict_SetItem(__pyx_d, __pyx_n_s_gpu_nms, __pyx_t_1) < 0) __PYX_ERR(0, 16, __pyx_L1_error) __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; /* "nms/gpu_nms.pyx":1 * # -------------------------------------------------------- # <<<<<<<<<<<<<< * # Faster R-CNN * # Copyright (c) 2015 Microsoft */ __pyx_t_1 = PyDict_New(); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 1, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); if (PyDict_SetItem(__pyx_d, __pyx_n_s_test, __pyx_t_1) < 0) __PYX_ERR(0, 1, __pyx_L1_error) __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; /* "C:/Anaconda2/lib/site-packages/Cython/Includes/numpy/__init__.pxd":976 * arr.base = baseptr * * cdef inline object get_array_base(ndarray arr): # <<<<<<<<<<<<<< * if arr.base is NULL: * return None */ /*--- Wrapped vars code ---*/ goto __pyx_L0; __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_1); if (__pyx_m) { if (__pyx_d) { __Pyx_AddTraceback("init nms.gpu_nms", __pyx_clineno, __pyx_lineno, __pyx_filename); } Py_DECREF(__pyx_m); __pyx_m = 0; } else if (!PyErr_Occurred()) { PyErr_SetString(PyExc_ImportError, "init nms.gpu_nms"); } __pyx_L0:; __Pyx_RefNannyFinishContext(); #if PY_MAJOR_VERSION < 3 return; #else return __pyx_m; #endif } /* --- Runtime support code --- */ /* Refnanny */ #if CYTHON_REFNANNY static __Pyx_RefNannyAPIStruct *__Pyx_RefNannyImportAPI(const char *modname) { PyObject *m = NULL, *p = NULL; void *r = NULL; m = PyImport_ImportModule((char *)modname); if (!m) goto end; p = PyObject_GetAttrString(m, (char *)"RefNannyAPI"); if (!p) goto end; r = PyLong_AsVoidPtr(p); end: Py_XDECREF(p); Py_XDECREF(m); return (__Pyx_RefNannyAPIStruct *)r; } #endif /* RaiseArgTupleInvalid */ static void __Pyx_RaiseArgtupleInvalid( const char* func_name, int exact, Py_ssize_t num_min, Py_ssize_t num_max, Py_ssize_t num_found) { Py_ssize_t num_expected; const char *more_or_less; if (num_found < num_min) { num_expected = num_min; more_or_less = "at least"; } else { num_expected = num_max; more_or_less = "at most"; } if (exact) { more_or_less = "exactly"; } PyErr_Format(PyExc_TypeError, "%.200s() takes %.8s %" CYTHON_FORMAT_SSIZE_T "d positional argument%.1s (%" CYTHON_FORMAT_SSIZE_T "d given)", func_name, more_or_less, num_expected, (num_expected == 1) ? "" : "s", num_found); } /* RaiseDoubleKeywords */ static void __Pyx_RaiseDoubleKeywordsError( const char* func_name, PyObject* kw_name) { PyErr_Format(PyExc_TypeError, #if PY_MAJOR_VERSION >= 3 "%s() got multiple values for keyword argument '%U'", func_name, kw_name); #else "%s() got multiple values for keyword argument '%s'", func_name, PyString_AsString(kw_name)); #endif } /* ParseKeywords */ static int __Pyx_ParseOptionalKeywords( PyObject *kwds, PyObject **argnames[], PyObject *kwds2, PyObject *values[], Py_ssize_t num_pos_args, const char* function_name) { PyObject *key = 0, *value = 0; Py_ssize_t pos = 0; PyObject*** name; PyObject*** first_kw_arg = argnames + num_pos_args; while (PyDict_Next(kwds, &pos, &key, &value)) { name = first_kw_arg; while (*name && (**name != key)) name++; if (*name) { values[name-argnames] = value; continue; } name = first_kw_arg; #if PY_MAJOR_VERSION < 3 if (likely(PyString_CheckExact(key)) || likely(PyString_Check(key))) { while (*name) { if ((CYTHON_COMPILING_IN_PYPY || PyString_GET_SIZE(**name) == PyString_GET_SIZE(key)) && _PyString_Eq(**name, key)) { values[name-argnames] = value; break; } name++; } if (*name) continue; else { PyObject*** argname = argnames; while (argname != first_kw_arg) { if ((**argname == key) || ( (CYTHON_COMPILING_IN_PYPY || PyString_GET_SIZE(**argname) == PyString_GET_SIZE(key)) && _PyString_Eq(**argname, key))) { goto arg_passed_twice; } argname++; } } } else #endif if (likely(PyUnicode_Check(key))) { while (*name) { int cmp = (**name == key) ? 0 : #if !CYTHON_COMPILING_IN_PYPY && PY_MAJOR_VERSION >= 3 (PyUnicode_GET_SIZE(**name) != PyUnicode_GET_SIZE(key)) ? 1 : #endif PyUnicode_Compare(**name, key); if (cmp < 0 && unlikely(PyErr_Occurred())) goto bad; if (cmp == 0) { values[name-argnames] = value; break; } name++; } if (*name) continue; else { PyObject*** argname = argnames; while (argname != first_kw_arg) { int cmp = (**argname == key) ? 0 : #if !CYTHON_COMPILING_IN_PYPY && PY_MAJOR_VERSION >= 3 (PyUnicode_GET_SIZE(**argname) != PyUnicode_GET_SIZE(key)) ? 1 : #endif PyUnicode_Compare(**argname, key); if (cmp < 0 && unlikely(PyErr_Occurred())) goto bad; if (cmp == 0) goto arg_passed_twice; argname++; } } } else goto invalid_keyword_type; if (kwds2) { if (unlikely(PyDict_SetItem(kwds2, key, value))) goto bad; } else { goto invalid_keyword; } } return 0; arg_passed_twice: __Pyx_RaiseDoubleKeywordsError(function_name, key); goto bad; invalid_keyword_type: PyErr_Format(PyExc_TypeError, "%.200s() keywords must be strings", function_name); goto bad; invalid_keyword: PyErr_Format(PyExc_TypeError, #if PY_MAJOR_VERSION < 3 "%.200s() got an unexpected keyword argument '%.200s'", function_name, PyString_AsString(key)); #else "%s() got an unexpected keyword argument '%U'", function_name, key); #endif bad: return -1; } /* ArgTypeTest */ static void __Pyx_RaiseArgumentTypeInvalid(const char* name, PyObject *obj, PyTypeObject *type) { PyErr_Format(PyExc_TypeError, "Argument '%.200s' has incorrect type (expected %.200s, got %.200s)", name, type->tp_name, Py_TYPE(obj)->tp_name); } static CYTHON_INLINE int __Pyx_ArgTypeTest(PyObject *obj, PyTypeObject *type, int none_allowed, const char *name, int exact) { if (unlikely(!type)) { PyErr_SetString(PyExc_SystemError, "Missing type object"); return 0; } if (none_allowed && obj == Py_None) return 1; else if (exact) { if (likely(Py_TYPE(obj) == type)) return 1; #if PY_MAJOR_VERSION == 2 else if ((type == &PyBaseString_Type) && likely(__Pyx_PyBaseString_CheckExact(obj))) return 1; #endif } else { if (likely(PyObject_TypeCheck(obj, type))) return 1; } __Pyx_RaiseArgumentTypeInvalid(name, obj, type); return 0; } /* BufferFormatCheck */ static CYTHON_INLINE int __Pyx_IsLittleEndian(void) { unsigned int n = 1; return *(unsigned char*)(&n) != 0; } static void __Pyx_BufFmt_Init(__Pyx_BufFmt_Context* ctx, __Pyx_BufFmt_StackElem* stack, __Pyx_TypeInfo* type) { stack[0].field = &ctx->root; stack[0].parent_offset = 0; ctx->root.type = type; ctx->root.name = "buffer dtype"; ctx->root.offset = 0; ctx->head = stack; ctx->head->field = &ctx->root; ctx->fmt_offset = 0; ctx->head->parent_offset = 0; ctx->new_packmode = '@'; ctx->enc_packmode = '@'; ctx->new_count = 1; ctx->enc_count = 0; ctx->enc_type = 0; ctx->is_complex = 0; ctx->is_valid_array = 0; ctx->struct_alignment = 0; while (type->typegroup == 'S') { ++ctx->head; ctx->head->field = type->fields; ctx->head->parent_offset = 0; type = type->fields->type; } } static int __Pyx_BufFmt_ParseNumber(const char** ts) { int count; const char* t = *ts; if (*t < '0' || *t > '9') { return -1; } else { count = *t++ - '0'; while (*t >= '0' && *t < '9') { count *= 10; count += *t++ - '0'; } } *ts = t; return count; } static int __Pyx_BufFmt_ExpectNumber(const char **ts) { int number = __Pyx_BufFmt_ParseNumber(ts); if (number == -1) PyErr_Format(PyExc_ValueError,\ "Does not understand character buffer dtype format string ('%c')", **ts); return number; } static void __Pyx_BufFmt_RaiseUnexpectedChar(char ch) { PyErr_Format(PyExc_ValueError, "Unexpected format string character: '%c'", ch); } static const char* __Pyx_BufFmt_DescribeTypeChar(char ch, int is_complex) { switch (ch) { case 'c': return "'char'"; case 'b': return "'signed char'"; case 'B': return "'unsigned char'"; case 'h': return "'short'"; case 'H': return "'unsigned short'"; case 'i': return "'int'"; case 'I': return "'unsigned int'"; case 'l': return "'long'"; case 'L': return "'unsigned long'"; case 'q': return "'long long'"; case 'Q': return "'unsigned long long'"; case 'f': return (is_complex ? "'complex float'" : "'float'"); case 'd': return (is_complex ? "'complex double'" : "'double'"); case 'g': return (is_complex ? "'complex long double'" : "'long double'"); case 'T': return "a struct"; case 'O': return "Python object"; case 'P': return "a pointer"; case 's': case 'p': return "a string"; case 0: return "end"; default: return "unparseable format string"; } } static size_t __Pyx_BufFmt_TypeCharToStandardSize(char ch, int is_complex) { switch (ch) { case '?': case 'c': case 'b': case 'B': case 's': case 'p': return 1; case 'h': case 'H': return 2; case 'i': case 'I': case 'l': case 'L': return 4; case 'q': case 'Q': return 8; case 'f': return (is_complex ? 8 : 4); case 'd': return (is_complex ? 16 : 8); case 'g': { PyErr_SetString(PyExc_ValueError, "Python does not define a standard format string size for long double ('g').."); return 0; } case 'O': case 'P': return sizeof(void*); default: __Pyx_BufFmt_RaiseUnexpectedChar(ch); return 0; } } static size_t __Pyx_BufFmt_TypeCharToNativeSize(char ch, int is_complex) { switch (ch) { case 'c': case 'b': case 'B': case 's': case 'p': return 1; case 'h': case 'H': return sizeof(short); case 'i': case 'I': return sizeof(int); case 'l': case 'L': return sizeof(long); #ifdef HAVE_LONG_LONG case 'q': case 'Q': return sizeof(PY_LONG_LONG); #endif case 'f': return sizeof(float) * (is_complex ? 2 : 1); case 'd': return sizeof(double) * (is_complex ? 2 : 1); case 'g': return sizeof(long double) * (is_complex ? 2 : 1); case 'O': case 'P': return sizeof(void*); default: { __Pyx_BufFmt_RaiseUnexpectedChar(ch); return 0; } } } typedef struct { char c; short x; } __Pyx_st_short; typedef struct { char c; int x; } __Pyx_st_int; typedef struct { char c; long x; } __Pyx_st_long; typedef struct { char c; float x; } __Pyx_st_float; typedef struct { char c; double x; } __Pyx_st_double; typedef struct { char c; long double x; } __Pyx_st_longdouble; typedef struct { char c; void *x; } __Pyx_st_void_p; #ifdef HAVE_LONG_LONG typedef struct { char c; PY_LONG_LONG x; } __Pyx_st_longlong; #endif static size_t __Pyx_BufFmt_TypeCharToAlignment(char ch, CYTHON_UNUSED int is_complex) { switch (ch) { case '?': case 'c': case 'b': case 'B': case 's': case 'p': return 1; case 'h': case 'H': return sizeof(__Pyx_st_short) - sizeof(short); case 'i': case 'I': return sizeof(__Pyx_st_int) - sizeof(int); case 'l': case 'L': return sizeof(__Pyx_st_long) - sizeof(long); #ifdef HAVE_LONG_LONG case 'q': case 'Q': return sizeof(__Pyx_st_longlong) - sizeof(PY_LONG_LONG); #endif case 'f': return sizeof(__Pyx_st_float) - sizeof(float); case 'd': return sizeof(__Pyx_st_double) - sizeof(double); case 'g': return sizeof(__Pyx_st_longdouble) - sizeof(long double); case 'P': case 'O': return sizeof(__Pyx_st_void_p) - sizeof(void*); default: __Pyx_BufFmt_RaiseUnexpectedChar(ch); return 0; } } /* These are for computing the padding at the end of the struct to align on the first member of the struct. This will probably the same as above, but we don't have any guarantees. */ typedef struct { short x; char c; } __Pyx_pad_short; typedef struct { int x; char c; } __Pyx_pad_int; typedef struct { long x; char c; } __Pyx_pad_long; typedef struct { float x; char c; } __Pyx_pad_float; typedef struct { double x; char c; } __Pyx_pad_double; typedef struct { long double x; char c; } __Pyx_pad_longdouble; typedef struct { void *x; char c; } __Pyx_pad_void_p; #ifdef HAVE_LONG_LONG typedef struct { PY_LONG_LONG x; char c; } __Pyx_pad_longlong; #endif static size_t __Pyx_BufFmt_TypeCharToPadding(char ch, CYTHON_UNUSED int is_complex) { switch (ch) { case '?': case 'c': case 'b': case 'B': case 's': case 'p': return 1; case 'h': case 'H': return sizeof(__Pyx_pad_short) - sizeof(short); case 'i': case 'I': return sizeof(__Pyx_pad_int) - sizeof(int); case 'l': case 'L': return sizeof(__Pyx_pad_long) - sizeof(long); #ifdef HAVE_LONG_LONG case 'q': case 'Q': return sizeof(__Pyx_pad_longlong) - sizeof(PY_LONG_LONG); #endif case 'f': return sizeof(__Pyx_pad_float) - sizeof(float); case 'd': return sizeof(__Pyx_pad_double) - sizeof(double); case 'g': return sizeof(__Pyx_pad_longdouble) - sizeof(long double); case 'P': case 'O': return sizeof(__Pyx_pad_void_p) - sizeof(void*); default: __Pyx_BufFmt_RaiseUnexpectedChar(ch); return 0; } } static char __Pyx_BufFmt_TypeCharToGroup(char ch, int is_complex) { switch (ch) { case 'c': return 'H'; case 'b': case 'h': case 'i': case 'l': case 'q': case 's': case 'p': return 'I'; case 'B': case 'H': case 'I': case 'L': case 'Q': return 'U'; case 'f': case 'd': case 'g': return (is_complex ? 'C' : 'R'); case 'O': return 'O'; case 'P': return 'P'; default: { __Pyx_BufFmt_RaiseUnexpectedChar(ch); return 0; } } } static void __Pyx_BufFmt_RaiseExpected(__Pyx_BufFmt_Context* ctx) { if (ctx->head == NULL || ctx->head->field == &ctx->root) { const char* expected; const char* quote; if (ctx->head == NULL) { expected = "end"; quote = ""; } else { expected = ctx->head->field->type->name; quote = "'"; } PyErr_Format(PyExc_ValueError, "Buffer dtype mismatch, expected %s%s%s but got %s", quote, expected, quote, __Pyx_BufFmt_DescribeTypeChar(ctx->enc_type, ctx->is_complex)); } else { __Pyx_StructField* field = ctx->head->field; __Pyx_StructField* parent = (ctx->head - 1)->field; PyErr_Format(PyExc_ValueError, "Buffer dtype mismatch, expected '%s' but got %s in '%s.%s'", field->type->name, __Pyx_BufFmt_DescribeTypeChar(ctx->enc_type, ctx->is_complex), parent->type->name, field->name); } } static int __Pyx_BufFmt_ProcessTypeChunk(__Pyx_BufFmt_Context* ctx) { char group; size_t size, offset, arraysize = 1; if (ctx->enc_type == 0) return 0; if (ctx->head->field->type->arraysize[0]) { int i, ndim = 0; if (ctx->enc_type == 's' || ctx->enc_type == 'p') { ctx->is_valid_array = ctx->head->field->type->ndim == 1; ndim = 1; if (ctx->enc_count != ctx->head->field->type->arraysize[0]) { PyErr_Format(PyExc_ValueError, "Expected a dimension of size %zu, got %zu", ctx->head->field->type->arraysize[0], ctx->enc_count); return -1; } } if (!ctx->is_valid_array) { PyErr_Format(PyExc_ValueError, "Expected %d dimensions, got %d", ctx->head->field->type->ndim, ndim); return -1; } for (i = 0; i < ctx->head->field->type->ndim; i++) { arraysize *= ctx->head->field->type->arraysize[i]; } ctx->is_valid_array = 0; ctx->enc_count = 1; } group = __Pyx_BufFmt_TypeCharToGroup(ctx->enc_type, ctx->is_complex); do { __Pyx_StructField* field = ctx->head->field; __Pyx_TypeInfo* type = field->type; if (ctx->enc_packmode == '@' || ctx->enc_packmode == '^') { size = __Pyx_BufFmt_TypeCharToNativeSize(ctx->enc_type, ctx->is_complex); } else { size = __Pyx_BufFmt_TypeCharToStandardSize(ctx->enc_type, ctx->is_complex); } if (ctx->enc_packmode == '@') { size_t align_at = __Pyx_BufFmt_TypeCharToAlignment(ctx->enc_type, ctx->is_complex); size_t align_mod_offset; if (align_at == 0) return -1; align_mod_offset = ctx->fmt_offset % align_at; if (align_mod_offset > 0) ctx->fmt_offset += align_at - align_mod_offset; if (ctx->struct_alignment == 0) ctx->struct_alignment = __Pyx_BufFmt_TypeCharToPadding(ctx->enc_type, ctx->is_complex); } if (type->size != size || type->typegroup != group) { if (type->typegroup == 'C' && type->fields != NULL) { size_t parent_offset = ctx->head->parent_offset + field->offset; ++ctx->head; ctx->head->field = type->fields; ctx->head->parent_offset = parent_offset; continue; } if ((type->typegroup == 'H' || group == 'H') && type->size == size) { } else { __Pyx_BufFmt_RaiseExpected(ctx); return -1; } } offset = ctx->head->parent_offset + field->offset; if (ctx->fmt_offset != offset) { PyErr_Format(PyExc_ValueError, "Buffer dtype mismatch; next field is at offset %" CYTHON_FORMAT_SSIZE_T "d but %" CYTHON_FORMAT_SSIZE_T "d expected", (Py_ssize_t)ctx->fmt_offset, (Py_ssize_t)offset); return -1; } ctx->fmt_offset += size; if (arraysize) ctx->fmt_offset += (arraysize - 1) * size; --ctx->enc_count; while (1) { if (field == &ctx->root) { ctx->head = NULL; if (ctx->enc_count != 0) { __Pyx_BufFmt_RaiseExpected(ctx); return -1; } break; } ctx->head->field = ++field; if (field->type == NULL) { --ctx->head; field = ctx->head->field; continue; } else if (field->type->typegroup == 'S') { size_t parent_offset = ctx->head->parent_offset + field->offset; if (field->type->fields->type == NULL) continue; field = field->type->fields; ++ctx->head; ctx->head->field = field; ctx->head->parent_offset = parent_offset; break; } else { break; } } } while (ctx->enc_count); ctx->enc_type = 0; ctx->is_complex = 0; return 0; } static CYTHON_INLINE PyObject * __pyx_buffmt_parse_array(__Pyx_BufFmt_Context* ctx, const char** tsp) { const char *ts = *tsp; int i = 0, number; int ndim = ctx->head->field->type->ndim; ; ++ts; if (ctx->new_count != 1) { PyErr_SetString(PyExc_ValueError, "Cannot handle repeated arrays in format string"); return NULL; } if (__Pyx_BufFmt_ProcessTypeChunk(ctx) == -1) return NULL; while (*ts && *ts != ')') { switch (*ts) { case ' ': case '\f': case '\r': case '\n': case '\t': case '\v': continue; default: break; } number = __Pyx_BufFmt_ExpectNumber(&ts); if (number == -1) return NULL; if (i < ndim && (size_t) number != ctx->head->field->type->arraysize[i]) return PyErr_Format(PyExc_ValueError, "Expected a dimension of size %zu, got %d", ctx->head->field->type->arraysize[i], number); if (*ts != ',' && *ts != ')') return PyErr_Format(PyExc_ValueError, "Expected a comma in format string, got '%c'", *ts); if (*ts == ',') ts++; i++; } if (i != ndim) return PyErr_Format(PyExc_ValueError, "Expected %d dimension(s), got %d", ctx->head->field->type->ndim, i); if (!*ts) { PyErr_SetString(PyExc_ValueError, "Unexpected end of format string, expected ')'"); return NULL; } ctx->is_valid_array = 1; ctx->new_count = 1; *tsp = ++ts; return Py_None; } static const char* __Pyx_BufFmt_CheckString(__Pyx_BufFmt_Context* ctx, const char* ts) { int got_Z = 0; while (1) { switch(*ts) { case 0: if (ctx->enc_type != 0 && ctx->head == NULL) { __Pyx_BufFmt_RaiseExpected(ctx); return NULL; } if (__Pyx_BufFmt_ProcessTypeChunk(ctx) == -1) return NULL; if (ctx->head != NULL) { __Pyx_BufFmt_RaiseExpected(ctx); return NULL; } return ts; case ' ': case '\r': case '\n': ++ts; break; case '<': if (!__Pyx_IsLittleEndian()) { PyErr_SetString(PyExc_ValueError, "Little-endian buffer not supported on big-endian compiler"); return NULL; } ctx->new_packmode = '='; ++ts; break; case '>': case '!': if (__Pyx_IsLittleEndian()) { PyErr_SetString(PyExc_ValueError, "Big-endian buffer not supported on little-endian compiler"); return NULL; } ctx->new_packmode = '='; ++ts; break; case '=': case '@': case '^': ctx->new_packmode = *ts++; break; case 'T': { const char* ts_after_sub; size_t i, struct_count = ctx->new_count; size_t struct_alignment = ctx->struct_alignment; ctx->new_count = 1; ++ts; if (*ts != '{') { PyErr_SetString(PyExc_ValueError, "Buffer acquisition: Expected '{' after 'T'"); return NULL; } if (__Pyx_BufFmt_ProcessTypeChunk(ctx) == -1) return NULL; ctx->enc_type = 0; ctx->enc_count = 0; ctx->struct_alignment = 0; ++ts; ts_after_sub = ts; for (i = 0; i != struct_count; ++i) { ts_after_sub = __Pyx_BufFmt_CheckString(ctx, ts); if (!ts_after_sub) return NULL; } ts = ts_after_sub; if (struct_alignment) ctx->struct_alignment = struct_alignment; } break; case '}': { size_t alignment = ctx->struct_alignment; ++ts; if (__Pyx_BufFmt_ProcessTypeChunk(ctx) == -1) return NULL; ctx->enc_type = 0; if (alignment && ctx->fmt_offset % alignment) { ctx->fmt_offset += alignment - (ctx->fmt_offset % alignment); } } return ts; case 'x': if (__Pyx_BufFmt_ProcessTypeChunk(ctx) == -1) return NULL; ctx->fmt_offset += ctx->new_count; ctx->new_count = 1; ctx->enc_count = 0; ctx->enc_type = 0; ctx->enc_packmode = ctx->new_packmode; ++ts; break; case 'Z': got_Z = 1; ++ts; if (*ts != 'f' && *ts != 'd' && *ts != 'g') { __Pyx_BufFmt_RaiseUnexpectedChar('Z'); return NULL; } case 'c': case 'b': case 'B': case 'h': case 'H': case 'i': case 'I': case 'l': case 'L': case 'q': case 'Q': case 'f': case 'd': case 'g': case 'O': case 'p': if (ctx->enc_type == *ts && got_Z == ctx->is_complex && ctx->enc_packmode == ctx->new_packmode) { ctx->enc_count += ctx->new_count; ctx->new_count = 1; got_Z = 0; ++ts; break; } case 's': if (__Pyx_BufFmt_ProcessTypeChunk(ctx) == -1) return NULL; ctx->enc_count = ctx->new_count; ctx->enc_packmode = ctx->new_packmode; ctx->enc_type = *ts; ctx->is_complex = got_Z; ++ts; ctx->new_count = 1; got_Z = 0; break; case ':': ++ts; while(*ts != ':') ++ts; ++ts; break; case '(': if (!__pyx_buffmt_parse_array(ctx, &ts)) return NULL; break; default: { int number = __Pyx_BufFmt_ExpectNumber(&ts); if (number == -1) return NULL; ctx->new_count = (size_t)number; } } } } static CYTHON_INLINE void __Pyx_ZeroBuffer(Py_buffer* buf) { buf->buf = NULL; buf->obj = NULL; buf->strides = __Pyx_zeros; buf->shape = __Pyx_zeros; buf->suboffsets = __Pyx_minusones; } static CYTHON_INLINE int __Pyx_GetBufferAndValidate( Py_buffer* buf, PyObject* obj, __Pyx_TypeInfo* dtype, int flags, int nd, int cast, __Pyx_BufFmt_StackElem* stack) { if (obj == Py_None || obj == NULL) { __Pyx_ZeroBuffer(buf); return 0; } buf->buf = NULL; if (__Pyx_GetBuffer(obj, buf, flags) == -1) goto fail; if (buf->ndim != nd) { PyErr_Format(PyExc_ValueError, "Buffer has wrong number of dimensions (expected %d, got %d)", nd, buf->ndim); goto fail; } if (!cast) { __Pyx_BufFmt_Context ctx; __Pyx_BufFmt_Init(&ctx, stack, dtype); if (!__Pyx_BufFmt_CheckString(&ctx, buf->format)) goto fail; } if ((unsigned)buf->itemsize != dtype->size) { PyErr_Format(PyExc_ValueError, "Item size of buffer (%" CYTHON_FORMAT_SSIZE_T "d byte%s) does not match size of '%s' (%" CYTHON_FORMAT_SSIZE_T "d byte%s)", buf->itemsize, (buf->itemsize > 1) ? "s" : "", dtype->name, (Py_ssize_t)dtype->size, (dtype->size > 1) ? "s" : ""); goto fail; } if (buf->suboffsets == NULL) buf->suboffsets = __Pyx_minusones; return 0; fail:; __Pyx_ZeroBuffer(buf); return -1; } static CYTHON_INLINE void __Pyx_SafeReleaseBuffer(Py_buffer* info) { if (info->buf == NULL) return; if (info->suboffsets == __Pyx_minusones) info->suboffsets = NULL; __Pyx_ReleaseBuffer(info); } /* GetBuiltinName */ static PyObject *__Pyx_GetBuiltinName(PyObject *name) { PyObject* result = __Pyx_PyObject_GetAttrStr(__pyx_b, name); if (unlikely(!result)) { PyErr_Format(PyExc_NameError, #if PY_MAJOR_VERSION >= 3 "name '%U' is not defined", name); #else "name '%.200s' is not defined", PyString_AS_STRING(name)); #endif } return result; } /* GetModuleGlobalName */ static CYTHON_INLINE PyObject *__Pyx_GetModuleGlobalName(PyObject *name) { PyObject *result; #if CYTHON_COMPILING_IN_CPYTHON result = PyDict_GetItem(__pyx_d, name); if (likely(result)) { Py_INCREF(result); } else { #else result = PyObject_GetItem(__pyx_d, name); if (!result) { PyErr_Clear(); #endif result = __Pyx_GetBuiltinName(name); } return result; } /* PyObjectCall */ #if CYTHON_COMPILING_IN_CPYTHON static CYTHON_INLINE PyObject* __Pyx_PyObject_Call(PyObject *func, PyObject *arg, PyObject *kw) { PyObject *result; ternaryfunc call = func->ob_type->tp_call; if (unlikely(!call)) return PyObject_Call(func, arg, kw); if (unlikely(Py_EnterRecursiveCall((char*)" while calling a Python object"))) return NULL; result = (*call)(func, arg, kw); Py_LeaveRecursiveCall(); if (unlikely(!result) && unlikely(!PyErr_Occurred())) { PyErr_SetString( PyExc_SystemError, "NULL result without error in PyObject_Call"); } return result; } #endif /* ExtTypeTest */ static CYTHON_INLINE int __Pyx_TypeTest(PyObject *obj, PyTypeObject *type) { if (unlikely(!type)) { PyErr_SetString(PyExc_SystemError, "Missing type object"); return 0; } if (likely(PyObject_TypeCheck(obj, type))) return 1; PyErr_Format(PyExc_TypeError, "Cannot convert %.200s to %.200s", Py_TYPE(obj)->tp_name, type->tp_name); return 0; } /* PyObjectCallMethO */ #if CYTHON_COMPILING_IN_CPYTHON static CYTHON_INLINE PyObject* __Pyx_PyObject_CallMethO(PyObject *func, PyObject *arg) { PyObject *self, *result; PyCFunction cfunc; cfunc = PyCFunction_GET_FUNCTION(func); self = PyCFunction_GET_SELF(func); if (unlikely(Py_EnterRecursiveCall((char*)" while calling a Python object"))) return NULL; result = cfunc(self, arg); Py_LeaveRecursiveCall(); if (unlikely(!result) && unlikely(!PyErr_Occurred())) { PyErr_SetString( PyExc_SystemError, "NULL result without error in PyObject_Call"); } return result; } #endif /* PyObjectCallOneArg */ #if CYTHON_COMPILING_IN_CPYTHON static PyObject* __Pyx__PyObject_CallOneArg(PyObject *func, PyObject *arg) { PyObject *result; PyObject *args = PyTuple_New(1); if (unlikely(!args)) return NULL; Py_INCREF(arg); PyTuple_SET_ITEM(args, 0, arg); result = __Pyx_PyObject_Call(func, args, NULL); Py_DECREF(args); return result; } static CYTHON_INLINE PyObject* __Pyx_PyObject_CallOneArg(PyObject *func, PyObject *arg) { #ifdef __Pyx_CyFunction_USED if (likely(PyCFunction_Check(func) || PyObject_TypeCheck(func, __pyx_CyFunctionType))) { #else if (likely(PyCFunction_Check(func))) { #endif if (likely(PyCFunction_GET_FLAGS(func) & METH_O)) { return __Pyx_PyObject_CallMethO(func, arg); } } return __Pyx__PyObject_CallOneArg(func, arg); } #else static CYTHON_INLINE PyObject* __Pyx_PyObject_CallOneArg(PyObject *func, PyObject *arg) { PyObject *result; PyObject *args = PyTuple_Pack(1, arg); if (unlikely(!args)) return NULL; result = __Pyx_PyObject_Call(func, args, NULL); Py_DECREF(args); return result; } #endif /* PyObjectCallNoArg */ #if CYTHON_COMPILING_IN_CPYTHON static CYTHON_INLINE PyObject* __Pyx_PyObject_CallNoArg(PyObject *func) { #ifdef __Pyx_CyFunction_USED if (likely(PyCFunction_Check(func) || PyObject_TypeCheck(func, __pyx_CyFunctionType))) { #else if (likely(PyCFunction_Check(func))) { #endif if (likely(PyCFunction_GET_FLAGS(func) & METH_NOARGS)) { return __Pyx_PyObject_CallMethO(func, NULL); } } return __Pyx_PyObject_Call(func, __pyx_empty_tuple, NULL); } #endif /* BufferIndexError */ static void __Pyx_RaiseBufferIndexError(int axis) { PyErr_Format(PyExc_IndexError, "Out of bounds on buffer access (axis %d)", axis); } /* SliceObject */ static CYTHON_INLINE PyObject* __Pyx_PyObject_GetSlice(PyObject* obj, Py_ssize_t cstart, Py_ssize_t cstop, PyObject** _py_start, PyObject** _py_stop, PyObject** _py_slice, int has_cstart, int has_cstop, CYTHON_UNUSED int wraparound) { #if CYTHON_COMPILING_IN_CPYTHON PyMappingMethods* mp; #if PY_MAJOR_VERSION < 3 PySequenceMethods* ms = Py_TYPE(obj)->tp_as_sequence; if (likely(ms && ms->sq_slice)) { if (!has_cstart) { if (_py_start && (*_py_start != Py_None)) { cstart = __Pyx_PyIndex_AsSsize_t(*_py_start); if ((cstart == (Py_ssize_t)-1) && PyErr_Occurred()) goto bad; } else cstart = 0; } if (!has_cstop) { if (_py_stop && (*_py_stop != Py_None)) { cstop = __Pyx_PyIndex_AsSsize_t(*_py_stop); if ((cstop == (Py_ssize_t)-1) && PyErr_Occurred()) goto bad; } else cstop = PY_SSIZE_T_MAX; } if (wraparound && unlikely((cstart < 0) | (cstop < 0)) && likely(ms->sq_length)) { Py_ssize_t l = ms->sq_length(obj); if (likely(l >= 0)) { if (cstop < 0) { cstop += l; if (cstop < 0) cstop = 0; } if (cstart < 0) { cstart += l; if (cstart < 0) cstart = 0; } } else { if (!PyErr_ExceptionMatches(PyExc_OverflowError)) goto bad; PyErr_Clear(); } } return ms->sq_slice(obj, cstart, cstop); } #endif mp = Py_TYPE(obj)->tp_as_mapping; if (likely(mp && mp->mp_subscript)) #endif { PyObject* result; PyObject *py_slice, *py_start, *py_stop; if (_py_slice) { py_slice = *_py_slice; } else { PyObject* owned_start = NULL; PyObject* owned_stop = NULL; if (_py_start) { py_start = *_py_start; } else { if (has_cstart) { owned_start = py_start = PyInt_FromSsize_t(cstart); if (unlikely(!py_start)) goto bad; } else py_start = Py_None; } if (_py_stop) { py_stop = *_py_stop; } else { if (has_cstop) { owned_stop = py_stop = PyInt_FromSsize_t(cstop); if (unlikely(!py_stop)) { Py_XDECREF(owned_start); goto bad; } } else py_stop = Py_None; } py_slice = PySlice_New(py_start, py_stop, Py_None); Py_XDECREF(owned_start); Py_XDECREF(owned_stop); if (unlikely(!py_slice)) goto bad; } #if CYTHON_COMPILING_IN_CPYTHON result = mp->mp_subscript(obj, py_slice); #else result = PyObject_GetItem(obj, py_slice); #endif if (!_py_slice) { Py_DECREF(py_slice); } return result; } PyErr_Format(PyExc_TypeError, "'%.200s' object is unsliceable", Py_TYPE(obj)->tp_name); bad: return NULL; } /* BufferFallbackError */ static void __Pyx_RaiseBufferFallbackError(void) { PyErr_SetString(PyExc_ValueError, "Buffer acquisition failed on assignment; and then reacquiring the old buffer failed too!"); } /* PyErrFetchRestore */ #if CYTHON_COMPILING_IN_CPYTHON static CYTHON_INLINE void __Pyx_ErrRestoreInState(PyThreadState *tstate, PyObject *type, PyObject *value, PyObject *tb) { PyObject *tmp_type, *tmp_value, *tmp_tb; tmp_type = tstate->curexc_type; tmp_value = tstate->curexc_value; tmp_tb = tstate->curexc_traceback; tstate->curexc_type = type; tstate->curexc_value = value; tstate->curexc_traceback = tb; Py_XDECREF(tmp_type); Py_XDECREF(tmp_value); Py_XDECREF(tmp_tb); } static CYTHON_INLINE void __Pyx_ErrFetchInState(PyThreadState *tstate, PyObject **type, PyObject **value, PyObject **tb) { *type = tstate->curexc_type; *value = tstate->curexc_value; *tb = tstate->curexc_traceback; tstate->curexc_type = 0; tstate->curexc_value = 0; tstate->curexc_traceback = 0; } #endif /* RaiseException */ #if PY_MAJOR_VERSION < 3 static void __Pyx_Raise(PyObject *type, PyObject *value, PyObject *tb, CYTHON_UNUSED PyObject *cause) { __Pyx_PyThreadState_declare Py_XINCREF(type); if (!value || value == Py_None) value = NULL; else Py_INCREF(value); if (!tb || tb == Py_None) tb = NULL; else { Py_INCREF(tb); if (!PyTraceBack_Check(tb)) { PyErr_SetString(PyExc_TypeError, "raise: arg 3 must be a traceback or None"); goto raise_error; } } if (PyType_Check(type)) { #if CYTHON_COMPILING_IN_PYPY if (!value) { Py_INCREF(Py_None); value = Py_None; } #endif PyErr_NormalizeException(&type, &value, &tb); } else { if (value) { PyErr_SetString(PyExc_TypeError, "instance exception may not have a separate value"); goto raise_error; } value = type; type = (PyObject*) Py_TYPE(type); Py_INCREF(type); if (!PyType_IsSubtype((PyTypeObject *)type, (PyTypeObject *)PyExc_BaseException)) { PyErr_SetString(PyExc_TypeError, "raise: exception class must be a subclass of BaseException"); goto raise_error; } } __Pyx_PyThreadState_assign __Pyx_ErrRestore(type, value, tb); return; raise_error: Py_XDECREF(value); Py_XDECREF(type); Py_XDECREF(tb); return; } #else static void __Pyx_Raise(PyObject *type, PyObject *value, PyObject *tb, PyObject *cause) { PyObject* owned_instance = NULL; if (tb == Py_None) { tb = 0; } else if (tb && !PyTraceBack_Check(tb)) { PyErr_SetString(PyExc_TypeError, "raise: arg 3 must be a traceback or None"); goto bad; } if (value == Py_None) value = 0; if (PyExceptionInstance_Check(type)) { if (value) { PyErr_SetString(PyExc_TypeError, "instance exception may not have a separate value"); goto bad; } value = type; type = (PyObject*) Py_TYPE(value); } else if (PyExceptionClass_Check(type)) { PyObject *instance_class = NULL; if (value && PyExceptionInstance_Check(value)) { instance_class = (PyObject*) Py_TYPE(value); if (instance_class != type) { int is_subclass = PyObject_IsSubclass(instance_class, type); if (!is_subclass) { instance_class = NULL; } else if (unlikely(is_subclass == -1)) { goto bad; } else { type = instance_class; } } } if (!instance_class) { PyObject *args; if (!value) args = PyTuple_New(0); else if (PyTuple_Check(value)) { Py_INCREF(value); args = value; } else args = PyTuple_Pack(1, value); if (!args) goto bad; owned_instance = PyObject_Call(type, args, NULL); Py_DECREF(args); if (!owned_instance) goto bad; value = owned_instance; if (!PyExceptionInstance_Check(value)) { PyErr_Format(PyExc_TypeError, "calling %R should have returned an instance of " "BaseException, not %R", type, Py_TYPE(value)); goto bad; } } } else { PyErr_SetString(PyExc_TypeError, "raise: exception class must be a subclass of BaseException"); goto bad; } #if PY_VERSION_HEX >= 0x03030000 if (cause) { #else if (cause && cause != Py_None) { #endif PyObject *fixed_cause; if (cause == Py_None) { fixed_cause = NULL; } else if (PyExceptionClass_Check(cause)) { fixed_cause = PyObject_CallObject(cause, NULL); if (fixed_cause == NULL) goto bad; } else if (PyExceptionInstance_Check(cause)) { fixed_cause = cause; Py_INCREF(fixed_cause); } else { PyErr_SetString(PyExc_TypeError, "exception causes must derive from " "BaseException"); goto bad; } PyException_SetCause(value, fixed_cause); } PyErr_SetObject(type, value); if (tb) { #if CYTHON_COMPILING_IN_PYPY PyObject *tmp_type, *tmp_value, *tmp_tb; PyErr_Fetch(&tmp_type, &tmp_value, &tmp_tb); Py_INCREF(tb); PyErr_Restore(tmp_type, tmp_value, tb); Py_XDECREF(tmp_tb); #else PyThreadState *tstate = PyThreadState_GET(); PyObject* tmp_tb = tstate->curexc_traceback; if (tb != tmp_tb) { Py_INCREF(tb); tstate->curexc_traceback = tb; Py_XDECREF(tmp_tb); } #endif } bad: Py_XDECREF(owned_instance); return; } #endif /* RaiseTooManyValuesToUnpack */ static CYTHON_INLINE void __Pyx_RaiseTooManyValuesError(Py_ssize_t expected) { PyErr_Format(PyExc_ValueError, "too many values to unpack (expected %" CYTHON_FORMAT_SSIZE_T "d)", expected); } /* RaiseNeedMoreValuesToUnpack */ static CYTHON_INLINE void __Pyx_RaiseNeedMoreValuesError(Py_ssize_t index) { PyErr_Format(PyExc_ValueError, "need more than %" CYTHON_FORMAT_SSIZE_T "d value%.1s to unpack", index, (index == 1) ? "" : "s"); } /* RaiseNoneIterError */ static CYTHON_INLINE void __Pyx_RaiseNoneNotIterableError(void) { PyErr_SetString(PyExc_TypeError, "'NoneType' object is not iterable"); } /* Import */ static PyObject *__Pyx_Import(PyObject *name, PyObject *from_list, int level) { PyObject *empty_list = 0; PyObject *module = 0; PyObject *global_dict = 0; PyObject *empty_dict = 0; PyObject *list; #if PY_VERSION_HEX < 0x03030000 PyObject *py_import; py_import = __Pyx_PyObject_GetAttrStr(__pyx_b, __pyx_n_s_import); if (!py_import) goto bad; #endif if (from_list) list = from_list; else { empty_list = PyList_New(0); if (!empty_list) goto bad; list = empty_list; } global_dict = PyModule_GetDict(__pyx_m); if (!global_dict) goto bad; empty_dict = PyDict_New(); if (!empty_dict) goto bad; { #if PY_MAJOR_VERSION >= 3 if (level == -1) { if (strchr(__Pyx_MODULE_NAME, '.')) { #if PY_VERSION_HEX < 0x03030000 PyObject *py_level = PyInt_FromLong(1); if (!py_level) goto bad; module = PyObject_CallFunctionObjArgs(py_import, name, global_dict, empty_dict, list, py_level, NULL); Py_DECREF(py_level); #else module = PyImport_ImportModuleLevelObject( name, global_dict, empty_dict, list, 1); #endif if (!module) { if (!PyErr_ExceptionMatches(PyExc_ImportError)) goto bad; PyErr_Clear(); } } level = 0; } #endif if (!module) { #if PY_VERSION_HEX < 0x03030000 PyObject *py_level = PyInt_FromLong(level); if (!py_level) goto bad; module = PyObject_CallFunctionObjArgs(py_import, name, global_dict, empty_dict, list, py_level, NULL); Py_DECREF(py_level); #else module = PyImport_ImportModuleLevelObject( name, global_dict, empty_dict, list, level); #endif } } bad: #if PY_VERSION_HEX < 0x03030000 Py_XDECREF(py_import); #endif Py_XDECREF(empty_list); Py_XDECREF(empty_dict); return module; } /* CodeObjectCache */ static int __pyx_bisect_code_objects(__Pyx_CodeObjectCacheEntry* entries, int count, int code_line) { int start = 0, mid = 0, end = count - 1; if (end >= 0 && code_line > entries[end].code_line) { return count; } while (start < end) { mid = start + (end - start) / 2; if (code_line < entries[mid].code_line) { end = mid; } else if (code_line > entries[mid].code_line) { start = mid + 1; } else { return mid; } } if (code_line <= entries[mid].code_line) { return mid; } else { return mid + 1; } } static PyCodeObject *__pyx_find_code_object(int code_line) { PyCodeObject* code_object; int pos; if (unlikely(!code_line) || unlikely(!__pyx_code_cache.entries)) { return NULL; } pos = __pyx_bisect_code_objects(__pyx_code_cache.entries, __pyx_code_cache.count, code_line); if (unlikely(pos >= __pyx_code_cache.count) || unlikely(__pyx_code_cache.entries[pos].code_line != code_line)) { return NULL; } code_object = __pyx_code_cache.entries[pos].code_object; Py_INCREF(code_object); return code_object; } static void __pyx_insert_code_object(int code_line, PyCodeObject* code_object) { int pos, i; __Pyx_CodeObjectCacheEntry* entries = __pyx_code_cache.entries; if (unlikely(!code_line)) { return; } if (unlikely(!entries)) { entries = (__Pyx_CodeObjectCacheEntry*)PyMem_Malloc(64*sizeof(__Pyx_CodeObjectCacheEntry)); if (likely(entries)) { __pyx_code_cache.entries = entries; __pyx_code_cache.max_count = 64; __pyx_code_cache.count = 1; entries[0].code_line = code_line; entries[0].code_object = code_object; Py_INCREF(code_object); } return; } pos = __pyx_bisect_code_objects(__pyx_code_cache.entries, __pyx_code_cache.count, code_line); if ((pos < __pyx_code_cache.count) && unlikely(__pyx_code_cache.entries[pos].code_line == code_line)) { PyCodeObject* tmp = entries[pos].code_object; entries[pos].code_object = code_object; Py_DECREF(tmp); return; } if (__pyx_code_cache.count == __pyx_code_cache.max_count) { int new_max = __pyx_code_cache.max_count + 64; entries = (__Pyx_CodeObjectCacheEntry*)PyMem_Realloc( __pyx_code_cache.entries, (size_t)new_max*sizeof(__Pyx_CodeObjectCacheEntry)); if (unlikely(!entries)) { return; } __pyx_code_cache.entries = entries; __pyx_code_cache.max_count = new_max; } for (i=__pyx_code_cache.count; i>pos; i--) { entries[i] = entries[i-1]; } entries[pos].code_line = code_line; entries[pos].code_object = code_object; __pyx_code_cache.count++; Py_INCREF(code_object); } /* AddTraceback */ #include "compile.h" #include "frameobject.h" #include "traceback.h" static PyCodeObject* __Pyx_CreateCodeObjectForTraceback( const char *funcname, int c_line, int py_line, const char *filename) { PyCodeObject *py_code = 0; PyObject *py_srcfile = 0; PyObject *py_funcname = 0; #if PY_MAJOR_VERSION < 3 py_srcfile = PyString_FromString(filename); #else py_srcfile = PyUnicode_FromString(filename); #endif if (!py_srcfile) goto bad; if (c_line) { #if PY_MAJOR_VERSION < 3 py_funcname = PyString_FromFormat( "%s (%s:%d)", funcname, __pyx_cfilenm, c_line); #else py_funcname = PyUnicode_FromFormat( "%s (%s:%d)", funcname, __pyx_cfilenm, c_line); #endif } else { #if PY_MAJOR_VERSION < 3 py_funcname = PyString_FromString(funcname); #else py_funcname = PyUnicode_FromString(funcname); #endif } if (!py_funcname) goto bad; py_code = __Pyx_PyCode_New( 0, 0, 0, 0, 0, __pyx_empty_bytes, /*PyObject *code,*/ __pyx_empty_tuple, /*PyObject *consts,*/ __pyx_empty_tuple, /*PyObject *names,*/ __pyx_empty_tuple, /*PyObject *varnames,*/ __pyx_empty_tuple, /*PyObject *freevars,*/ __pyx_empty_tuple, /*PyObject *cellvars,*/ py_srcfile, /*PyObject *filename,*/ py_funcname, /*PyObject *name,*/ py_line, __pyx_empty_bytes /*PyObject *lnotab*/ ); Py_DECREF(py_srcfile); Py_DECREF(py_funcname); return py_code; bad: Py_XDECREF(py_srcfile); Py_XDECREF(py_funcname); return NULL; } static void __Pyx_AddTraceback(const char *funcname, int c_line, int py_line, const char *filename) { PyCodeObject *py_code = 0; PyFrameObject *py_frame = 0; py_code = __pyx_find_code_object(c_line ? c_line : py_line); if (!py_code) { py_code = __Pyx_CreateCodeObjectForTraceback( funcname, c_line, py_line, filename); if (!py_code) goto bad; __pyx_insert_code_object(c_line ? c_line : py_line, py_code); } py_frame = PyFrame_New( PyThreadState_GET(), /*PyThreadState *tstate,*/ py_code, /*PyCodeObject *code,*/ __pyx_d, /*PyObject *globals,*/ 0 /*PyObject *locals*/ ); if (!py_frame) goto bad; py_frame->f_lineno = py_line; PyTraceBack_Here(py_frame); bad: Py_XDECREF(py_code); Py_XDECREF(py_frame); } #if PY_MAJOR_VERSION < 3 static int __Pyx_GetBuffer(PyObject *obj, Py_buffer *view, int flags) { if (PyObject_CheckBuffer(obj)) return PyObject_GetBuffer(obj, view, flags); if (PyObject_TypeCheck(obj, __pyx_ptype_5numpy_ndarray)) return __pyx_pw_5numpy_7ndarray_1__getbuffer__(obj, view, flags); PyErr_Format(PyExc_TypeError, "'%.200s' does not have the buffer interface", Py_TYPE(obj)->tp_name); return -1; } static void __Pyx_ReleaseBuffer(Py_buffer *view) { PyObject *obj = view->obj; if (!obj) return; if (PyObject_CheckBuffer(obj)) { PyBuffer_Release(view); return; } if (PyObject_TypeCheck(obj, __pyx_ptype_5numpy_ndarray)) { __pyx_pw_5numpy_7ndarray_3__releasebuffer__(obj, view); return; } Py_DECREF(obj); view->obj = NULL; } #endif /* CIntFromPyVerify */ #define __PYX_VERIFY_RETURN_INT(target_type, func_type, func_value)\ __PYX__VERIFY_RETURN_INT(target_type, func_type, func_value, 0) #define __PYX_VERIFY_RETURN_INT_EXC(target_type, func_type, func_value)\ __PYX__VERIFY_RETURN_INT(target_type, func_type, func_value, 1) #define __PYX__VERIFY_RETURN_INT(target_type, func_type, func_value, exc)\ {\ func_type value = func_value;\ if (sizeof(target_type) < sizeof(func_type)) {\ if (unlikely(value != (func_type) (target_type) value)) {\ func_type zero = 0;\ if (exc && unlikely(value == (func_type)-1 && PyErr_Occurred()))\ return (target_type) -1;\ if (is_unsigned && unlikely(value < zero))\ goto raise_neg_overflow;\ else\ goto raise_overflow;\ }\ }\ return (target_type) value;\ } /* CIntToPy */ static CYTHON_INLINE PyObject* __Pyx_PyInt_From_int(int value) { const int neg_one = (int) -1, const_zero = (int) 0; const int is_unsigned = neg_one > const_zero; if (is_unsigned) { if (sizeof(int) < sizeof(long)) { return PyInt_FromLong((long) value); } else if (sizeof(int) <= sizeof(unsigned long)) { return PyLong_FromUnsignedLong((unsigned long) value); } else if (sizeof(int) <= sizeof(unsigned PY_LONG_LONG)) { return PyLong_FromUnsignedLongLong((unsigned PY_LONG_LONG) value); } } else { if (sizeof(int) <= sizeof(long)) { return PyInt_FromLong((long) value); } else if (sizeof(int) <= sizeof(PY_LONG_LONG)) { return PyLong_FromLongLong((PY_LONG_LONG) value); } } { int one = 1; int little = (int)*(unsigned char *)&one; unsigned char *bytes = (unsigned char *)&value; return _PyLong_FromByteArray(bytes, sizeof(int), little, !is_unsigned); } } /* None */ #if CYTHON_CCOMPLEX #ifdef __cplusplus static CYTHON_INLINE __pyx_t_float_complex __pyx_t_float_complex_from_parts(float x, float y) { return ::std::complex< float >(x, y); } #else static CYTHON_INLINE __pyx_t_float_complex __pyx_t_float_complex_from_parts(float x, float y) { return x + y*(__pyx_t_float_complex)_Complex_I; } #endif #else static CYTHON_INLINE __pyx_t_float_complex __pyx_t_float_complex_from_parts(float x, float y) { __pyx_t_float_complex z; z.real = x; z.imag = y; return z; } #endif /* None */ #if CYTHON_CCOMPLEX #else static CYTHON_INLINE int __Pyx_c_eqf(__pyx_t_float_complex a, __pyx_t_float_complex b) { return (a.real == b.real) && (a.imag == b.imag); } static CYTHON_INLINE __pyx_t_float_complex __Pyx_c_sumf(__pyx_t_float_complex a, __pyx_t_float_complex b) { __pyx_t_float_complex z; z.real = a.real + b.real; z.imag = a.imag + b.imag; return z; } static CYTHON_INLINE __pyx_t_float_complex __Pyx_c_difff(__pyx_t_float_complex a, __pyx_t_float_complex b) { __pyx_t_float_complex z; z.real = a.real - b.real; z.imag = a.imag - b.imag; return z; } static CYTHON_INLINE __pyx_t_float_complex __Pyx_c_prodf(__pyx_t_float_complex a, __pyx_t_float_complex b) { __pyx_t_float_complex z; z.real = a.real * b.real - a.imag * b.imag; z.imag = a.real * b.imag + a.imag * b.real; return z; } static CYTHON_INLINE __pyx_t_float_complex __Pyx_c_quotf(__pyx_t_float_complex a, __pyx_t_float_complex b) { __pyx_t_float_complex z; float denom = b.real * b.real + b.imag * b.imag; z.real = (a.real * b.real + a.imag * b.imag) / denom; z.imag = (a.imag * b.real - a.real * b.imag) / denom; return z; } static CYTHON_INLINE __pyx_t_float_complex __Pyx_c_negf(__pyx_t_float_complex a) { __pyx_t_float_complex z; z.real = -a.real; z.imag = -a.imag; return z; } static CYTHON_INLINE int __Pyx_c_is_zerof(__pyx_t_float_complex a) { return (a.real == 0) && (a.imag == 0); } static CYTHON_INLINE __pyx_t_float_complex __Pyx_c_conjf(__pyx_t_float_complex a) { __pyx_t_float_complex z; z.real = a.real; z.imag = -a.imag; return z; } #if 1 static CYTHON_INLINE float __Pyx_c_absf(__pyx_t_float_complex z) { #if !defined(HAVE_HYPOT) || defined(_MSC_VER) return sqrtf(z.real*z.real + z.imag*z.imag); #else return hypotf(z.real, z.imag); #endif } static CYTHON_INLINE __pyx_t_float_complex __Pyx_c_powf(__pyx_t_float_complex a, __pyx_t_float_complex b) { __pyx_t_float_complex z; float r, lnr, theta, z_r, z_theta; if (b.imag == 0 && b.real == (int)b.real) { if (b.real < 0) { float denom = a.real * a.real + a.imag * a.imag; a.real = a.real / denom; a.imag = -a.imag / denom; b.real = -b.real; } switch ((int)b.real) { case 0: z.real = 1; z.imag = 0; return z; case 1: return a; case 2: z = __Pyx_c_prodf(a, a); return __Pyx_c_prodf(a, a); case 3: z = __Pyx_c_prodf(a, a); return __Pyx_c_prodf(z, a); case 4: z = __Pyx_c_prodf(a, a); return __Pyx_c_prodf(z, z); } } if (a.imag == 0) { if (a.real == 0) { return a; } r = a.real; theta = 0; } else { r = __Pyx_c_absf(a); theta = atan2f(a.imag, a.real); } lnr = logf(r); z_r = expf(lnr * b.real - theta * b.imag); z_theta = theta * b.real + lnr * b.imag; z.real = z_r * cosf(z_theta); z.imag = z_r * sinf(z_theta); return z; } #endif #endif /* None */ #if CYTHON_CCOMPLEX #ifdef __cplusplus static CYTHON_INLINE __pyx_t_double_complex __pyx_t_double_complex_from_parts(double x, double y) { return ::std::complex< double >(x, y); } #else static CYTHON_INLINE __pyx_t_double_complex __pyx_t_double_complex_from_parts(double x, double y) { return x + y*(__pyx_t_double_complex)_Complex_I; } #endif #else static CYTHON_INLINE __pyx_t_double_complex __pyx_t_double_complex_from_parts(double x, double y) { __pyx_t_double_complex z; z.real = x; z.imag = y; return z; } #endif /* None */ #if CYTHON_CCOMPLEX #else static CYTHON_INLINE int __Pyx_c_eq(__pyx_t_double_complex a, __pyx_t_double_complex b) { return (a.real == b.real) && (a.imag == b.imag); } static CYTHON_INLINE __pyx_t_double_complex __Pyx_c_sum(__pyx_t_double_complex a, __pyx_t_double_complex b) { __pyx_t_double_complex z; z.real = a.real + b.real; z.imag = a.imag + b.imag; return z; } static CYTHON_INLINE __pyx_t_double_complex __Pyx_c_diff(__pyx_t_double_complex a, __pyx_t_double_complex b) { __pyx_t_double_complex z; z.real = a.real - b.real; z.imag = a.imag - b.imag; return z; } static CYTHON_INLINE __pyx_t_double_complex __Pyx_c_prod(__pyx_t_double_complex a, __pyx_t_double_complex b) { __pyx_t_double_complex z; z.real = a.real * b.real - a.imag * b.imag; z.imag = a.real * b.imag + a.imag * b.real; return z; } static CYTHON_INLINE __pyx_t_double_complex __Pyx_c_quot(__pyx_t_double_complex a, __pyx_t_double_complex b) { __pyx_t_double_complex z; double denom = b.real * b.real + b.imag * b.imag; z.real = (a.real * b.real + a.imag * b.imag) / denom; z.imag = (a.imag * b.real - a.real * b.imag) / denom; return z; } static CYTHON_INLINE __pyx_t_double_complex __Pyx_c_neg(__pyx_t_double_complex a) { __pyx_t_double_complex z; z.real = -a.real; z.imag = -a.imag; return z; } static CYTHON_INLINE int __Pyx_c_is_zero(__pyx_t_double_complex a) { return (a.real == 0) && (a.imag == 0); } static CYTHON_INLINE __pyx_t_double_complex __Pyx_c_conj(__pyx_t_double_complex a) { __pyx_t_double_complex z; z.real = a.real; z.imag = -a.imag; return z; } #if 1 static CYTHON_INLINE double __Pyx_c_abs(__pyx_t_double_complex z) { #if !defined(HAVE_HYPOT) || defined(_MSC_VER) return sqrt(z.real*z.real + z.imag*z.imag); #else return hypot(z.real, z.imag); #endif } static CYTHON_INLINE __pyx_t_double_complex __Pyx_c_pow(__pyx_t_double_complex a, __pyx_t_double_complex b) { __pyx_t_double_complex z; double r, lnr, theta, z_r, z_theta; if (b.imag == 0 && b.real == (int)b.real) { if (b.real < 0) { double denom = a.real * a.real + a.imag * a.imag; a.real = a.real / denom; a.imag = -a.imag / denom; b.real = -b.real; } switch ((int)b.real) { case 0: z.real = 1; z.imag = 0; return z; case 1: return a; case 2: z = __Pyx_c_prod(a, a); return __Pyx_c_prod(a, a); case 3: z = __Pyx_c_prod(a, a); return __Pyx_c_prod(z, a); case 4: z = __Pyx_c_prod(a, a); return __Pyx_c_prod(z, z); } } if (a.imag == 0) { if (a.real == 0) { return a; } r = a.real; theta = 0; } else { r = __Pyx_c_abs(a); theta = atan2(a.imag, a.real); } lnr = log(r); z_r = exp(lnr * b.real - theta * b.imag); z_theta = theta * b.real + lnr * b.imag; z.real = z_r * cos(z_theta); z.imag = z_r * sin(z_theta); return z; } #endif #endif /* CIntToPy */ static CYTHON_INLINE PyObject* __Pyx_PyInt_From_enum__NPY_TYPES(enum NPY_TYPES value) { const enum NPY_TYPES neg_one = (enum NPY_TYPES) -1, const_zero = (enum NPY_TYPES) 0; const int is_unsigned = neg_one > const_zero; if (is_unsigned) { if (sizeof(enum NPY_TYPES) < sizeof(long)) { return PyInt_FromLong((long) value); } else if (sizeof(enum NPY_TYPES) <= sizeof(unsigned long)) { return PyLong_FromUnsignedLong((unsigned long) value); } else if (sizeof(enum NPY_TYPES) <= sizeof(unsigned PY_LONG_LONG)) { return PyLong_FromUnsignedLongLong((unsigned PY_LONG_LONG) value); } } else { if (sizeof(enum NPY_TYPES) <= sizeof(long)) { return PyInt_FromLong((long) value); } else if (sizeof(enum NPY_TYPES) <= sizeof(PY_LONG_LONG)) { return PyLong_FromLongLong((PY_LONG_LONG) value); } } { int one = 1; int little = (int)*(unsigned char *)&one; unsigned char *bytes = (unsigned char *)&value; return _PyLong_FromByteArray(bytes, sizeof(enum NPY_TYPES), little, !is_unsigned); } } /* CIntFromPy */ static CYTHON_INLINE npy_int32 __Pyx_PyInt_As_npy_int32(PyObject *x) { const npy_int32 neg_one = (npy_int32) -1, const_zero = (npy_int32) 0; const int is_unsigned = neg_one > const_zero; #if PY_MAJOR_VERSION < 3 if (likely(PyInt_Check(x))) { if (sizeof(npy_int32) < sizeof(long)) { __PYX_VERIFY_RETURN_INT(npy_int32, long, PyInt_AS_LONG(x)) } else { long val = PyInt_AS_LONG(x); if (is_unsigned && unlikely(val < 0)) { goto raise_neg_overflow; } return (npy_int32) val; } } else #endif if (likely(PyLong_Check(x))) { if (is_unsigned) { #if CYTHON_USE_PYLONG_INTERNALS const digit* digits = ((PyLongObject*)x)->ob_digit; switch (Py_SIZE(x)) { case 0: return (npy_int32) 0; case 1: __PYX_VERIFY_RETURN_INT(npy_int32, digit, digits[0]) case 2: if (8 * sizeof(npy_int32) > 1 * PyLong_SHIFT) { if (8 * sizeof(unsigned long) > 2 * PyLong_SHIFT) { __PYX_VERIFY_RETURN_INT(npy_int32, unsigned long, (((((unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]))) } else if (8 * sizeof(npy_int32) >= 2 * PyLong_SHIFT) { return (npy_int32) (((((npy_int32)digits[1]) << PyLong_SHIFT) | (npy_int32)digits[0])); } } break; case 3: if (8 * sizeof(npy_int32) > 2 * PyLong_SHIFT) { if (8 * sizeof(unsigned long) > 3 * PyLong_SHIFT) { __PYX_VERIFY_RETURN_INT(npy_int32, unsigned long, (((((((unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]))) } else if (8 * sizeof(npy_int32) >= 3 * PyLong_SHIFT) { return (npy_int32) (((((((npy_int32)digits[2]) << PyLong_SHIFT) | (npy_int32)digits[1]) << PyLong_SHIFT) | (npy_int32)digits[0])); } } break; case 4: if (8 * sizeof(npy_int32) > 3 * PyLong_SHIFT) { if (8 * sizeof(unsigned long) > 4 * PyLong_SHIFT) { __PYX_VERIFY_RETURN_INT(npy_int32, unsigned long, (((((((((unsigned long)digits[3]) << PyLong_SHIFT) | (unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]))) } else if (8 * sizeof(npy_int32) >= 4 * PyLong_SHIFT) { return (npy_int32) (((((((((npy_int32)digits[3]) << PyLong_SHIFT) | (npy_int32)digits[2]) << PyLong_SHIFT) | (npy_int32)digits[1]) << PyLong_SHIFT) | (npy_int32)digits[0])); } } break; } #endif #if CYTHON_COMPILING_IN_CPYTHON if (unlikely(Py_SIZE(x) < 0)) { goto raise_neg_overflow; } #else { int result = PyObject_RichCompareBool(x, Py_False, Py_LT); if (unlikely(result < 0)) return (npy_int32) -1; if (unlikely(result == 1)) goto raise_neg_overflow; } #endif if (sizeof(npy_int32) <= sizeof(unsigned long)) { __PYX_VERIFY_RETURN_INT_EXC(npy_int32, unsigned long, PyLong_AsUnsignedLong(x)) } else if (sizeof(npy_int32) <= sizeof(unsigned PY_LONG_LONG)) { __PYX_VERIFY_RETURN_INT_EXC(npy_int32, unsigned PY_LONG_LONG, PyLong_AsUnsignedLongLong(x)) } } else { #if CYTHON_USE_PYLONG_INTERNALS const digit* digits = ((PyLongObject*)x)->ob_digit; switch (Py_SIZE(x)) { case 0: return (npy_int32) 0; case -1: __PYX_VERIFY_RETURN_INT(npy_int32, sdigit, (sdigit) (-(sdigit)digits[0])) case 1: __PYX_VERIFY_RETURN_INT(npy_int32, digit, +digits[0]) case -2: if (8 * sizeof(npy_int32) - 1 > 1 * PyLong_SHIFT) { if (8 * sizeof(unsigned long) > 2 * PyLong_SHIFT) { __PYX_VERIFY_RETURN_INT(npy_int32, long, -(long) (((((unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]))) } else if (8 * sizeof(npy_int32) - 1 > 2 * PyLong_SHIFT) { return (npy_int32) (((npy_int32)-1)*(((((npy_int32)digits[1]) << PyLong_SHIFT) | (npy_int32)digits[0]))); } } break; case 2: if (8 * sizeof(npy_int32) > 1 * PyLong_SHIFT) { if (8 * sizeof(unsigned long) > 2 * PyLong_SHIFT) { __PYX_VERIFY_RETURN_INT(npy_int32, unsigned long, (((((unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]))) } else if (8 * sizeof(npy_int32) - 1 > 2 * PyLong_SHIFT) { return (npy_int32) ((((((npy_int32)digits[1]) << PyLong_SHIFT) | (npy_int32)digits[0]))); } } break; case -3: if (8 * sizeof(npy_int32) - 1 > 2 * PyLong_SHIFT) { if (8 * sizeof(unsigned long) > 3 * PyLong_SHIFT) { __PYX_VERIFY_RETURN_INT(npy_int32, long, -(long) (((((((unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]))) } else if (8 * sizeof(npy_int32) - 1 > 3 * PyLong_SHIFT) { return (npy_int32) (((npy_int32)-1)*(((((((npy_int32)digits[2]) << PyLong_SHIFT) | (npy_int32)digits[1]) << PyLong_SHIFT) | (npy_int32)digits[0]))); } } break; case 3: if (8 * sizeof(npy_int32) > 2 * PyLong_SHIFT) { if (8 * sizeof(unsigned long) > 3 * PyLong_SHIFT) { __PYX_VERIFY_RETURN_INT(npy_int32, unsigned long, (((((((unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]))) } else if (8 * sizeof(npy_int32) - 1 > 3 * PyLong_SHIFT) { return (npy_int32) ((((((((npy_int32)digits[2]) << PyLong_SHIFT) | (npy_int32)digits[1]) << PyLong_SHIFT) | (npy_int32)digits[0]))); } } break; case -4: if (8 * sizeof(npy_int32) - 1 > 3 * PyLong_SHIFT) { if (8 * sizeof(unsigned long) > 4 * PyLong_SHIFT) { __PYX_VERIFY_RETURN_INT(npy_int32, long, -(long) (((((((((unsigned long)digits[3]) << PyLong_SHIFT) | (unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]))) } else if (8 * sizeof(npy_int32) - 1 > 4 * PyLong_SHIFT) { return (npy_int32) (((npy_int32)-1)*(((((((((npy_int32)digits[3]) << PyLong_SHIFT) | (npy_int32)digits[2]) << PyLong_SHIFT) | (npy_int32)digits[1]) << PyLong_SHIFT) | (npy_int32)digits[0]))); } } break; case 4: if (8 * sizeof(npy_int32) > 3 * PyLong_SHIFT) { if (8 * sizeof(unsigned long) > 4 * PyLong_SHIFT) { __PYX_VERIFY_RETURN_INT(npy_int32, unsigned long, (((((((((unsigned long)digits[3]) << PyLong_SHIFT) | (unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]))) } else if (8 * sizeof(npy_int32) - 1 > 4 * PyLong_SHIFT) { return (npy_int32) ((((((((((npy_int32)digits[3]) << PyLong_SHIFT) | (npy_int32)digits[2]) << PyLong_SHIFT) | (npy_int32)digits[1]) << PyLong_SHIFT) | (npy_int32)digits[0]))); } } break; } #endif if (sizeof(npy_int32) <= sizeof(long)) { __PYX_VERIFY_RETURN_INT_EXC(npy_int32, long, PyLong_AsLong(x)) } else if (sizeof(npy_int32) <= sizeof(PY_LONG_LONG)) { __PYX_VERIFY_RETURN_INT_EXC(npy_int32, PY_LONG_LONG, PyLong_AsLongLong(x)) } } { #if CYTHON_COMPILING_IN_PYPY && !defined(_PyLong_AsByteArray) PyErr_SetString(PyExc_RuntimeError, "_PyLong_AsByteArray() not available in PyPy, cannot convert large numbers"); #else npy_int32 val; PyObject *v = __Pyx_PyNumber_IntOrLong(x); #if PY_MAJOR_VERSION < 3 if (likely(v) && !PyLong_Check(v)) { PyObject *tmp = v; v = PyNumber_Long(tmp); Py_DECREF(tmp); } #endif if (likely(v)) { int one = 1; int is_little = (int)*(unsigned char *)&one; unsigned char *bytes = (unsigned char *)&val; int ret = _PyLong_AsByteArray((PyLongObject *)v, bytes, sizeof(val), is_little, !is_unsigned); Py_DECREF(v); if (likely(!ret)) return val; } #endif return (npy_int32) -1; } } else { npy_int32 val; PyObject *tmp = __Pyx_PyNumber_IntOrLong(x); if (!tmp) return (npy_int32) -1; val = __Pyx_PyInt_As_npy_int32(tmp); Py_DECREF(tmp); return val; } raise_overflow: PyErr_SetString(PyExc_OverflowError, "value too large to convert to npy_int32"); return (npy_int32) -1; raise_neg_overflow: PyErr_SetString(PyExc_OverflowError, "can't convert negative value to npy_int32"); return (npy_int32) -1; } /* CIntFromPy */ static CYTHON_INLINE int __Pyx_PyInt_As_int(PyObject *x) { const int neg_one = (int) -1, const_zero = (int) 0; const int is_unsigned = neg_one > const_zero; #if PY_MAJOR_VERSION < 3 if (likely(PyInt_Check(x))) { if (sizeof(int) < sizeof(long)) { __PYX_VERIFY_RETURN_INT(int, long, PyInt_AS_LONG(x)) } else { long val = PyInt_AS_LONG(x); if (is_unsigned && unlikely(val < 0)) { goto raise_neg_overflow; } return (int) val; } } else #endif if (likely(PyLong_Check(x))) { if (is_unsigned) { #if CYTHON_USE_PYLONG_INTERNALS const digit* digits = ((PyLongObject*)x)->ob_digit; switch (Py_SIZE(x)) { case 0: return (int) 0; case 1: __PYX_VERIFY_RETURN_INT(int, digit, digits[0]) case 2: if (8 * sizeof(int) > 1 * PyLong_SHIFT) { if (8 * sizeof(unsigned long) > 2 * PyLong_SHIFT) { __PYX_VERIFY_RETURN_INT(int, unsigned long, (((((unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]))) } else if (8 * sizeof(int) >= 2 * PyLong_SHIFT) { return (int) (((((int)digits[1]) << PyLong_SHIFT) | (int)digits[0])); } } break; case 3: if (8 * sizeof(int) > 2 * PyLong_SHIFT) { if (8 * sizeof(unsigned long) > 3 * PyLong_SHIFT) { __PYX_VERIFY_RETURN_INT(int, unsigned long, (((((((unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]))) } else if (8 * sizeof(int) >= 3 * PyLong_SHIFT) { return (int) (((((((int)digits[2]) << PyLong_SHIFT) | (int)digits[1]) << PyLong_SHIFT) | (int)digits[0])); } } break; case 4: if (8 * sizeof(int) > 3 * PyLong_SHIFT) { if (8 * sizeof(unsigned long) > 4 * PyLong_SHIFT) { __PYX_VERIFY_RETURN_INT(int, unsigned long, (((((((((unsigned long)digits[3]) << PyLong_SHIFT) | (unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]))) } else if (8 * sizeof(int) >= 4 * PyLong_SHIFT) { return (int) (((((((((int)digits[3]) << PyLong_SHIFT) | (int)digits[2]) << PyLong_SHIFT) | (int)digits[1]) << PyLong_SHIFT) | (int)digits[0])); } } break; } #endif #if CYTHON_COMPILING_IN_CPYTHON if (unlikely(Py_SIZE(x) < 0)) { goto raise_neg_overflow; } #else { int result = PyObject_RichCompareBool(x, Py_False, Py_LT); if (unlikely(result < 0)) return (int) -1; if (unlikely(result == 1)) goto raise_neg_overflow; } #endif if (sizeof(int) <= sizeof(unsigned long)) { __PYX_VERIFY_RETURN_INT_EXC(int, unsigned long, PyLong_AsUnsignedLong(x)) } else if (sizeof(int) <= sizeof(unsigned PY_LONG_LONG)) { __PYX_VERIFY_RETURN_INT_EXC(int, unsigned PY_LONG_LONG, PyLong_AsUnsignedLongLong(x)) } } else { #if CYTHON_USE_PYLONG_INTERNALS const digit* digits = ((PyLongObject*)x)->ob_digit; switch (Py_SIZE(x)) { case 0: return (int) 0; case -1: __PYX_VERIFY_RETURN_INT(int, sdigit, (sdigit) (-(sdigit)digits[0])) case 1: __PYX_VERIFY_RETURN_INT(int, digit, +digits[0]) case -2: if (8 * sizeof(int) - 1 > 1 * PyLong_SHIFT) { if (8 * sizeof(unsigned long) > 2 * PyLong_SHIFT) { __PYX_VERIFY_RETURN_INT(int, long, -(long) (((((unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]))) } else if (8 * sizeof(int) - 1 > 2 * PyLong_SHIFT) { return (int) (((int)-1)*(((((int)digits[1]) << PyLong_SHIFT) | (int)digits[0]))); } } break; case 2: if (8 * sizeof(int) > 1 * PyLong_SHIFT) { if (8 * sizeof(unsigned long) > 2 * PyLong_SHIFT) { __PYX_VERIFY_RETURN_INT(int, unsigned long, (((((unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]))) } else if (8 * sizeof(int) - 1 > 2 * PyLong_SHIFT) { return (int) ((((((int)digits[1]) << PyLong_SHIFT) | (int)digits[0]))); } } break; case -3: if (8 * sizeof(int) - 1 > 2 * PyLong_SHIFT) { if (8 * sizeof(unsigned long) > 3 * PyLong_SHIFT) { __PYX_VERIFY_RETURN_INT(int, long, -(long) (((((((unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]))) } else if (8 * sizeof(int) - 1 > 3 * PyLong_SHIFT) { return (int) (((int)-1)*(((((((int)digits[2]) << PyLong_SHIFT) | (int)digits[1]) << PyLong_SHIFT) | (int)digits[0]))); } } break; case 3: if (8 * sizeof(int) > 2 * PyLong_SHIFT) { if (8 * sizeof(unsigned long) > 3 * PyLong_SHIFT) { __PYX_VERIFY_RETURN_INT(int, unsigned long, (((((((unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]))) } else if (8 * sizeof(int) - 1 > 3 * PyLong_SHIFT) { return (int) ((((((((int)digits[2]) << PyLong_SHIFT) | (int)digits[1]) << PyLong_SHIFT) | (int)digits[0]))); } } break; case -4: if (8 * sizeof(int) - 1 > 3 * PyLong_SHIFT) { if (8 * sizeof(unsigned long) > 4 * PyLong_SHIFT) { __PYX_VERIFY_RETURN_INT(int, long, -(long) (((((((((unsigned long)digits[3]) << PyLong_SHIFT) | (unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]))) } else if (8 * sizeof(int) - 1 > 4 * PyLong_SHIFT) { return (int) (((int)-1)*(((((((((int)digits[3]) << PyLong_SHIFT) | (int)digits[2]) << PyLong_SHIFT) | (int)digits[1]) << PyLong_SHIFT) | (int)digits[0]))); } } break; case 4: if (8 * sizeof(int) > 3 * PyLong_SHIFT) { if (8 * sizeof(unsigned long) > 4 * PyLong_SHIFT) { __PYX_VERIFY_RETURN_INT(int, unsigned long, (((((((((unsigned long)digits[3]) << PyLong_SHIFT) | (unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]))) } else if (8 * sizeof(int) - 1 > 4 * PyLong_SHIFT) { return (int) ((((((((((int)digits[3]) << PyLong_SHIFT) | (int)digits[2]) << PyLong_SHIFT) | (int)digits[1]) << PyLong_SHIFT) | (int)digits[0]))); } } break; } #endif if (sizeof(int) <= sizeof(long)) { __PYX_VERIFY_RETURN_INT_EXC(int, long, PyLong_AsLong(x)) } else if (sizeof(int) <= sizeof(PY_LONG_LONG)) { __PYX_VERIFY_RETURN_INT_EXC(int, PY_LONG_LONG, PyLong_AsLongLong(x)) } } { #if CYTHON_COMPILING_IN_PYPY && !defined(_PyLong_AsByteArray) PyErr_SetString(PyExc_RuntimeError, "_PyLong_AsByteArray() not available in PyPy, cannot convert large numbers"); #else int val; PyObject *v = __Pyx_PyNumber_IntOrLong(x); #if PY_MAJOR_VERSION < 3 if (likely(v) && !PyLong_Check(v)) { PyObject *tmp = v; v = PyNumber_Long(tmp); Py_DECREF(tmp); } #endif if (likely(v)) { int one = 1; int is_little = (int)*(unsigned char *)&one; unsigned char *bytes = (unsigned char *)&val; int ret = _PyLong_AsByteArray((PyLongObject *)v, bytes, sizeof(val), is_little, !is_unsigned); Py_DECREF(v); if (likely(!ret)) return val; } #endif return (int) -1; } } else { int val; PyObject *tmp = __Pyx_PyNumber_IntOrLong(x); if (!tmp) return (int) -1; val = __Pyx_PyInt_As_int(tmp); Py_DECREF(tmp); return val; } raise_overflow: PyErr_SetString(PyExc_OverflowError, "value too large to convert to int"); return (int) -1; raise_neg_overflow: PyErr_SetString(PyExc_OverflowError, "can't convert negative value to int"); return (int) -1; } /* CIntToPy */ static CYTHON_INLINE PyObject* __Pyx_PyInt_From_long(long value) { const long neg_one = (long) -1, const_zero = (long) 0; const int is_unsigned = neg_one > const_zero; if (is_unsigned) { if (sizeof(long) < sizeof(long)) { return PyInt_FromLong((long) value); } else if (sizeof(long) <= sizeof(unsigned long)) { return PyLong_FromUnsignedLong((unsigned long) value); } else if (sizeof(long) <= sizeof(unsigned PY_LONG_LONG)) { return PyLong_FromUnsignedLongLong((unsigned PY_LONG_LONG) value); } } else { if (sizeof(long) <= sizeof(long)) { return PyInt_FromLong((long) value); } else if (sizeof(long) <= sizeof(PY_LONG_LONG)) { return PyLong_FromLongLong((PY_LONG_LONG) value); } } { int one = 1; int little = (int)*(unsigned char *)&one; unsigned char *bytes = (unsigned char *)&value; return _PyLong_FromByteArray(bytes, sizeof(long), little, !is_unsigned); } } /* CIntFromPy */ static CYTHON_INLINE long __Pyx_PyInt_As_long(PyObject *x) { const long neg_one = (long) -1, const_zero = (long) 0; const int is_unsigned = neg_one > const_zero; #if PY_MAJOR_VERSION < 3 if (likely(PyInt_Check(x))) { if (sizeof(long) < sizeof(long)) { __PYX_VERIFY_RETURN_INT(long, long, PyInt_AS_LONG(x)) } else { long val = PyInt_AS_LONG(x); if (is_unsigned && unlikely(val < 0)) { goto raise_neg_overflow; } return (long) val; } } else #endif if (likely(PyLong_Check(x))) { if (is_unsigned) { #if CYTHON_USE_PYLONG_INTERNALS const digit* digits = ((PyLongObject*)x)->ob_digit; switch (Py_SIZE(x)) { case 0: return (long) 0; case 1: __PYX_VERIFY_RETURN_INT(long, digit, digits[0]) case 2: if (8 * sizeof(long) > 1 * PyLong_SHIFT) { if (8 * sizeof(unsigned long) > 2 * PyLong_SHIFT) { __PYX_VERIFY_RETURN_INT(long, unsigned long, (((((unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]))) } else if (8 * sizeof(long) >= 2 * PyLong_SHIFT) { return (long) (((((long)digits[1]) << PyLong_SHIFT) | (long)digits[0])); } } break; case 3: if (8 * sizeof(long) > 2 * PyLong_SHIFT) { if (8 * sizeof(unsigned long) > 3 * PyLong_SHIFT) { __PYX_VERIFY_RETURN_INT(long, unsigned long, (((((((unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]))) } else if (8 * sizeof(long) >= 3 * PyLong_SHIFT) { return (long) (((((((long)digits[2]) << PyLong_SHIFT) | (long)digits[1]) << PyLong_SHIFT) | (long)digits[0])); } } break; case 4: if (8 * sizeof(long) > 3 * PyLong_SHIFT) { if (8 * sizeof(unsigned long) > 4 * PyLong_SHIFT) { __PYX_VERIFY_RETURN_INT(long, unsigned long, (((((((((unsigned long)digits[3]) << PyLong_SHIFT) | (unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]))) } else if (8 * sizeof(long) >= 4 * PyLong_SHIFT) { return (long) (((((((((long)digits[3]) << PyLong_SHIFT) | (long)digits[2]) << PyLong_SHIFT) | (long)digits[1]) << PyLong_SHIFT) | (long)digits[0])); } } break; } #endif #if CYTHON_COMPILING_IN_CPYTHON if (unlikely(Py_SIZE(x) < 0)) { goto raise_neg_overflow; } #else { int result = PyObject_RichCompareBool(x, Py_False, Py_LT); if (unlikely(result < 0)) return (long) -1; if (unlikely(result == 1)) goto raise_neg_overflow; } #endif if (sizeof(long) <= sizeof(unsigned long)) { __PYX_VERIFY_RETURN_INT_EXC(long, unsigned long, PyLong_AsUnsignedLong(x)) } else if (sizeof(long) <= sizeof(unsigned PY_LONG_LONG)) { __PYX_VERIFY_RETURN_INT_EXC(long, unsigned PY_LONG_LONG, PyLong_AsUnsignedLongLong(x)) } } else { #if CYTHON_USE_PYLONG_INTERNALS const digit* digits = ((PyLongObject*)x)->ob_digit; switch (Py_SIZE(x)) { case 0: return (long) 0; case -1: __PYX_VERIFY_RETURN_INT(long, sdigit, (sdigit) (-(sdigit)digits[0])) case 1: __PYX_VERIFY_RETURN_INT(long, digit, +digits[0]) case -2: if (8 * sizeof(long) - 1 > 1 * PyLong_SHIFT) { if (8 * sizeof(unsigned long) > 2 * PyLong_SHIFT) { __PYX_VERIFY_RETURN_INT(long, long, -(long) (((((unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]))) } else if (8 * sizeof(long) - 1 > 2 * PyLong_SHIFT) { return (long) (((long)-1)*(((((long)digits[1]) << PyLong_SHIFT) | (long)digits[0]))); } } break; case 2: if (8 * sizeof(long) > 1 * PyLong_SHIFT) { if (8 * sizeof(unsigned long) > 2 * PyLong_SHIFT) { __PYX_VERIFY_RETURN_INT(long, unsigned long, (((((unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]))) } else if (8 * sizeof(long) - 1 > 2 * PyLong_SHIFT) { return (long) ((((((long)digits[1]) << PyLong_SHIFT) | (long)digits[0]))); } } break; case -3: if (8 * sizeof(long) - 1 > 2 * PyLong_SHIFT) { if (8 * sizeof(unsigned long) > 3 * PyLong_SHIFT) { __PYX_VERIFY_RETURN_INT(long, long, -(long) (((((((unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]))) } else if (8 * sizeof(long) - 1 > 3 * PyLong_SHIFT) { return (long) (((long)-1)*(((((((long)digits[2]) << PyLong_SHIFT) | (long)digits[1]) << PyLong_SHIFT) | (long)digits[0]))); } } break; case 3: if (8 * sizeof(long) > 2 * PyLong_SHIFT) { if (8 * sizeof(unsigned long) > 3 * PyLong_SHIFT) { __PYX_VERIFY_RETURN_INT(long, unsigned long, (((((((unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]))) } else if (8 * sizeof(long) - 1 > 3 * PyLong_SHIFT) { return (long) ((((((((long)digits[2]) << PyLong_SHIFT) | (long)digits[1]) << PyLong_SHIFT) | (long)digits[0]))); } } break; case -4: if (8 * sizeof(long) - 1 > 3 * PyLong_SHIFT) { if (8 * sizeof(unsigned long) > 4 * PyLong_SHIFT) { __PYX_VERIFY_RETURN_INT(long, long, -(long) (((((((((unsigned long)digits[3]) << PyLong_SHIFT) | (unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]))) } else if (8 * sizeof(long) - 1 > 4 * PyLong_SHIFT) { return (long) (((long)-1)*(((((((((long)digits[3]) << PyLong_SHIFT) | (long)digits[2]) << PyLong_SHIFT) | (long)digits[1]) << PyLong_SHIFT) | (long)digits[0]))); } } break; case 4: if (8 * sizeof(long) > 3 * PyLong_SHIFT) { if (8 * sizeof(unsigned long) > 4 * PyLong_SHIFT) { __PYX_VERIFY_RETURN_INT(long, unsigned long, (((((((((unsigned long)digits[3]) << PyLong_SHIFT) | (unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]))) } else if (8 * sizeof(long) - 1 > 4 * PyLong_SHIFT) { return (long) ((((((((((long)digits[3]) << PyLong_SHIFT) | (long)digits[2]) << PyLong_SHIFT) | (long)digits[1]) << PyLong_SHIFT) | (long)digits[0]))); } } break; } #endif if (sizeof(long) <= sizeof(long)) { __PYX_VERIFY_RETURN_INT_EXC(long, long, PyLong_AsLong(x)) } else if (sizeof(long) <= sizeof(PY_LONG_LONG)) { __PYX_VERIFY_RETURN_INT_EXC(long, PY_LONG_LONG, PyLong_AsLongLong(x)) } } { #if CYTHON_COMPILING_IN_PYPY && !defined(_PyLong_AsByteArray) PyErr_SetString(PyExc_RuntimeError, "_PyLong_AsByteArray() not available in PyPy, cannot convert large numbers"); #else long val; PyObject *v = __Pyx_PyNumber_IntOrLong(x); #if PY_MAJOR_VERSION < 3 if (likely(v) && !PyLong_Check(v)) { PyObject *tmp = v; v = PyNumber_Long(tmp); Py_DECREF(tmp); } #endif if (likely(v)) { int one = 1; int is_little = (int)*(unsigned char *)&one; unsigned char *bytes = (unsigned char *)&val; int ret = _PyLong_AsByteArray((PyLongObject *)v, bytes, sizeof(val), is_little, !is_unsigned); Py_DECREF(v); if (likely(!ret)) return val; } #endif return (long) -1; } } else { long val; PyObject *tmp = __Pyx_PyNumber_IntOrLong(x); if (!tmp) return (long) -1; val = __Pyx_PyInt_As_long(tmp); Py_DECREF(tmp); return val; } raise_overflow: PyErr_SetString(PyExc_OverflowError, "value too large to convert to long"); return (long) -1; raise_neg_overflow: PyErr_SetString(PyExc_OverflowError, "can't convert negative value to long"); return (long) -1; } /* CheckBinaryVersion */ static int __Pyx_check_binary_version(void) { char ctversion[4], rtversion[4]; PyOS_snprintf(ctversion, 4, "%d.%d", PY_MAJOR_VERSION, PY_MINOR_VERSION); PyOS_snprintf(rtversion, 4, "%s", Py_GetVersion()); if (ctversion[0] != rtversion[0] || ctversion[2] != rtversion[2]) { char message[200]; PyOS_snprintf(message, sizeof(message), "compiletime version %s of module '%.100s' " "does not match runtime version %s", ctversion, __Pyx_MODULE_NAME, rtversion); return PyErr_WarnEx(NULL, message, 1); } return 0; } /* ModuleImport */ #ifndef __PYX_HAVE_RT_ImportModule #define __PYX_HAVE_RT_ImportModule static PyObject *__Pyx_ImportModule(const char *name) { PyObject *py_name = 0; PyObject *py_module = 0; py_name = __Pyx_PyIdentifier_FromString(name); if (!py_name) goto bad; py_module = PyImport_Import(py_name); Py_DECREF(py_name); return py_module; bad: Py_XDECREF(py_name); return 0; } #endif /* TypeImport */ #ifndef __PYX_HAVE_RT_ImportType #define __PYX_HAVE_RT_ImportType static PyTypeObject *__Pyx_ImportType(const char *module_name, const char *class_name, size_t size, int strict) { PyObject *py_module = 0; PyObject *result = 0; PyObject *py_name = 0; char warning[200]; Py_ssize_t basicsize; #ifdef Py_LIMITED_API PyObject *py_basicsize; #endif py_module = __Pyx_ImportModule(module_name); if (!py_module) goto bad; py_name = __Pyx_PyIdentifier_FromString(class_name); if (!py_name) goto bad; result = PyObject_GetAttr(py_module, py_name); Py_DECREF(py_name); py_name = 0; Py_DECREF(py_module); py_module = 0; if (!result) goto bad; if (!PyType_Check(result)) { PyErr_Format(PyExc_TypeError, "%.200s.%.200s is not a type object", module_name, class_name); goto bad; } #ifndef Py_LIMITED_API basicsize = ((PyTypeObject *)result)->tp_basicsize; #else py_basicsize = PyObject_GetAttrString(result, "__basicsize__"); if (!py_basicsize) goto bad; basicsize = PyLong_AsSsize_t(py_basicsize); Py_DECREF(py_basicsize); py_basicsize = 0; if (basicsize == (Py_ssize_t)-1 && PyErr_Occurred()) goto bad; #endif if (!strict && (size_t)basicsize > size) { PyOS_snprintf(warning, sizeof(warning), "%s.%s size changed, may indicate binary incompatibility. Expected %zd, got %zd", module_name, class_name, basicsize, size); if (PyErr_WarnEx(NULL, warning, 0) < 0) goto bad; } else if ((size_t)basicsize != size) { PyErr_Format(PyExc_ValueError, "%.200s.%.200s has the wrong size, try recompiling. Expected %zd, got %zd", module_name, class_name, basicsize, size); goto bad; } return (PyTypeObject *)result; bad: Py_XDECREF(py_module); Py_XDECREF(result); return NULL; } #endif /* InitStrings */ static int __Pyx_InitStrings(__Pyx_StringTabEntry *t) { while (t->p) { #if PY_MAJOR_VERSION < 3 if (t->is_unicode) { *t->p = PyUnicode_DecodeUTF8(t->s, t->n - 1, NULL); } else if (t->intern) { *t->p = PyString_InternFromString(t->s); } else { *t->p = PyString_FromStringAndSize(t->s, t->n - 1); } #else if (t->is_unicode | t->is_str) { if (t->intern) { *t->p = PyUnicode_InternFromString(t->s); } else if (t->encoding) { *t->p = PyUnicode_Decode(t->s, t->n - 1, t->encoding, NULL); } else { *t->p = PyUnicode_FromStringAndSize(t->s, t->n - 1); } } else { *t->p = PyBytes_FromStringAndSize(t->s, t->n - 1); } #endif if (!*t->p) return -1; ++t; } return 0; } static CYTHON_INLINE PyObject* __Pyx_PyUnicode_FromString(const char* c_str) { return __Pyx_PyUnicode_FromStringAndSize(c_str, (Py_ssize_t)strlen(c_str)); } static CYTHON_INLINE char* __Pyx_PyObject_AsString(PyObject* o) { Py_ssize_t ignore; return __Pyx_PyObject_AsStringAndSize(o, &ignore); } static CYTHON_INLINE char* __Pyx_PyObject_AsStringAndSize(PyObject* o, Py_ssize_t *length) { #if CYTHON_COMPILING_IN_CPYTHON && (__PYX_DEFAULT_STRING_ENCODING_IS_ASCII || __PYX_DEFAULT_STRING_ENCODING_IS_DEFAULT) if ( #if PY_MAJOR_VERSION < 3 && __PYX_DEFAULT_STRING_ENCODING_IS_ASCII __Pyx_sys_getdefaultencoding_not_ascii && #endif PyUnicode_Check(o)) { #if PY_VERSION_HEX < 0x03030000 char* defenc_c; PyObject* defenc = _PyUnicode_AsDefaultEncodedString(o, NULL); if (!defenc) return NULL; defenc_c = PyBytes_AS_STRING(defenc); #if __PYX_DEFAULT_STRING_ENCODING_IS_ASCII { char* end = defenc_c + PyBytes_GET_SIZE(defenc); char* c; for (c = defenc_c; c < end; c++) { if ((unsigned char) (*c) >= 128) { PyUnicode_AsASCIIString(o); return NULL; } } } #endif *length = PyBytes_GET_SIZE(defenc); return defenc_c; #else if (__Pyx_PyUnicode_READY(o) == -1) return NULL; #if __PYX_DEFAULT_STRING_ENCODING_IS_ASCII if (PyUnicode_IS_ASCII(o)) { *length = PyUnicode_GET_LENGTH(o); return PyUnicode_AsUTF8(o); } else { PyUnicode_AsASCIIString(o); return NULL; } #else return PyUnicode_AsUTF8AndSize(o, length); #endif #endif } else #endif #if (!CYTHON_COMPILING_IN_PYPY) || (defined(PyByteArray_AS_STRING) && defined(PyByteArray_GET_SIZE)) if (PyByteArray_Check(o)) { *length = PyByteArray_GET_SIZE(o); return PyByteArray_AS_STRING(o); } else #endif { char* result; int r = PyBytes_AsStringAndSize(o, &result, length); if (unlikely(r < 0)) { return NULL; } else { return result; } } } static CYTHON_INLINE int __Pyx_PyObject_IsTrue(PyObject* x) { int is_true = x == Py_True; if (is_true | (x == Py_False) | (x == Py_None)) return is_true; else return PyObject_IsTrue(x); } static CYTHON_INLINE PyObject* __Pyx_PyNumber_IntOrLong(PyObject* x) { PyNumberMethods *m; const char *name = NULL; PyObject *res = NULL; #if PY_MAJOR_VERSION < 3 if (PyInt_Check(x) || PyLong_Check(x)) #else if (PyLong_Check(x)) #endif return __Pyx_NewRef(x); m = Py_TYPE(x)->tp_as_number; #if PY_MAJOR_VERSION < 3 if (m && m->nb_int) { name = "int"; res = PyNumber_Int(x); } else if (m && m->nb_long) { name = "long"; res = PyNumber_Long(x); } #else if (m && m->nb_int) { name = "int"; res = PyNumber_Long(x); } #endif if (res) { #if PY_MAJOR_VERSION < 3 if (!PyInt_Check(res) && !PyLong_Check(res)) { #else if (!PyLong_Check(res)) { #endif PyErr_Format(PyExc_TypeError, "__%.4s__ returned non-%.4s (type %.200s)", name, name, Py_TYPE(res)->tp_name); Py_DECREF(res); return NULL; } } else if (!PyErr_Occurred()) { PyErr_SetString(PyExc_TypeError, "an integer is required"); } return res; } static CYTHON_INLINE Py_ssize_t __Pyx_PyIndex_AsSsize_t(PyObject* b) { Py_ssize_t ival; PyObject *x; #if PY_MAJOR_VERSION < 3 if (likely(PyInt_CheckExact(b))) { if (sizeof(Py_ssize_t) >= sizeof(long)) return PyInt_AS_LONG(b); else return PyInt_AsSsize_t(x); } #endif if (likely(PyLong_CheckExact(b))) { #if CYTHON_USE_PYLONG_INTERNALS const digit* digits = ((PyLongObject*)b)->ob_digit; const Py_ssize_t size = Py_SIZE(b); if (likely(__Pyx_sst_abs(size) <= 1)) { ival = likely(size) ? digits[0] : 0; if (size == -1) ival = -ival; return ival; } else { switch (size) { case 2: if (8 * sizeof(Py_ssize_t) > 2 * PyLong_SHIFT) { return (Py_ssize_t) (((((size_t)digits[1]) << PyLong_SHIFT) | (size_t)digits[0])); } break; case -2: if (8 * sizeof(Py_ssize_t) > 2 * PyLong_SHIFT) { return -(Py_ssize_t) (((((size_t)digits[1]) << PyLong_SHIFT) | (size_t)digits[0])); } break; case 3: if (8 * sizeof(Py_ssize_t) > 3 * PyLong_SHIFT) { return (Py_ssize_t) (((((((size_t)digits[2]) << PyLong_SHIFT) | (size_t)digits[1]) << PyLong_SHIFT) | (size_t)digits[0])); } break; case -3: if (8 * sizeof(Py_ssize_t) > 3 * PyLong_SHIFT) { return -(Py_ssize_t) (((((((size_t)digits[2]) << PyLong_SHIFT) | (size_t)digits[1]) << PyLong_SHIFT) | (size_t)digits[0])); } break; case 4: if (8 * sizeof(Py_ssize_t) > 4 * PyLong_SHIFT) { return (Py_ssize_t) (((((((((size_t)digits[3]) << PyLong_SHIFT) | (size_t)digits[2]) << PyLong_SHIFT) | (size_t)digits[1]) << PyLong_SHIFT) | (size_t)digits[0])); } break; case -4: if (8 * sizeof(Py_ssize_t) > 4 * PyLong_SHIFT) { return -(Py_ssize_t) (((((((((size_t)digits[3]) << PyLong_SHIFT) | (size_t)digits[2]) << PyLong_SHIFT) | (size_t)digits[1]) << PyLong_SHIFT) | (size_t)digits[0])); } break; } } #endif return PyLong_AsSsize_t(b); } x = PyNumber_Index(b); if (!x) return -1; ival = PyInt_AsSsize_t(x); Py_DECREF(x); return ival; } static CYTHON_INLINE PyObject * __Pyx_PyInt_FromSize_t(size_t ival) { return PyInt_FromSize_t(ival); } #endif /* Py_PYTHON_H */
63ab48dc491aa1fafb08701d18f1b4739f3e0c44.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include <stdlib.h> #include <math.h> #include <float.h> #include <helper_cuda.h> #include <cu-kd-search.cuh> #include "kd-tree-build.cuh" __device__ __host__ float cuDist(struct Point qp, struct Node point) { float dx = qp.p[0] - point.p[0], dy = qp.p[1] - point.p[1], dz = qp.p[2] - point.p[2]; return (dx * dx) + (dy * dy) + (dz * dz); } __device__ __host__ void cuInitStack(struct SPoint **stack) { (*stack)[0].index = -1; (*stack)++; } __device__ __host__ bool cuIsEmpty(struct SPoint *stack) { return cuPeek(stack).index == -1; } __device__ __host__ void cuPush(struct SPoint **stack, struct SPoint value) { *((*stack)++) = value; } __device__ __host__ struct SPoint cuPop(struct SPoint **stack) { return *(--(*stack)); } __device__ __host__ struct SPoint cuPeek(struct SPoint *stack) { return *(stack - 1); } __device__ __host__ void cuInitKStack(struct KPoint **k_stack, int n) { (*k_stack)--; for (int i = 1; i <= n; ++i) { (*k_stack)[i].dist = FLT_MAX; (*k_stack)[i].index = -1; } } __device__ __host__ void cuInsert(struct KPoint *k_stack, struct KPoint k_point, int n) { int i_child, now; struct KPoint child, child_tmp_2; for (now = 1; now * 2 <= n ; now = i_child) { i_child = now * 2; child = k_stack[i_child]; child_tmp_2 = k_stack[i_child + 1]; if (i_child <= n && child_tmp_2.dist > child.dist ) { i_child++; child = child_tmp_2; } if (i_child <= n && k_point.dist < child.dist) { k_stack[now] = child; } else { break; } } k_stack[now] = k_point; } __device__ __host__ struct KPoint cuLook(struct KPoint *k_stack) { return k_stack[1]; } __device__ __host__ void cuUpDim(int &dim) { dim = (dim + 1) % 3; } __device__ __host__ void cuChildren(struct Point qp, struct Node current, float dx, int &target, int &other) { if (dx > 0) { other = current.right; target = current.left; } else { other = current.left; target = current.right; } } __device__ __host__ void cuKNN(struct Point qp, struct Node *tree, int n, int k, struct SPoint *stack, struct KPoint *k_stack) { int dim = 2, target; float current_dist; struct Node current_point; struct SPoint current; struct KPoint worst_best; current.index = n / 2; cuInitStack(&stack); cuInitKStack(&k_stack, k); worst_best = cuLook(k_stack); while (!cuIsEmpty(stack) || current.index != -1) { if (current.index == -1 && !cuIsEmpty(stack)) { current = cuPop(&stack); dim = current.dim; current.index = (current.dx * current.dx < worst_best.dist) ? current.other : -1; } else { current_point = tree[current.index]; current_dist = cuDist(qp, current_point); if (worst_best.dist > current_dist) { worst_best.dist = current_dist; worst_best.index = current.index; cuInsert(k_stack, worst_best, k); worst_best = cuLook(k_stack); } cuUpDim(dim); current.dim = dim; current.dx = current_point.p[dim] - qp.p[dim]; cuChildren(qp, current_point, current.dx, target, current.other); cuPush(&stack, current); current.index = target; } } } __device__ __host__ int fastIntegerLog2(int x) { int y = 0; while (x >>= 1) { y++; } return y; } __device__ void cuCalculateBlockOffsetAndNoOfQueries(int n, int &n_per_block, int &block_offset) { int rest = n % gridDim.x; n_per_block = n / gridDim.x; block_offset = n_per_block * blockIdx.x; if (rest >= gridDim.x - blockIdx.x) { block_offset += rest - (gridDim.x - blockIdx.x); n_per_block++; } } __device__ __host__ int getSStackSize(int n) { return fastIntegerLog2(n) + 2; } size_t getSStackSizeInBytes(int n, int thread_num, int block_num) { return block_num * thread_num * ((getSStackSize(n) * sizeof(SPoint))); } size_t getNeededBytesInSearch(int n_qp, int k, int n, int thread_num, int block_num) { return n_qp * (k * sizeof(int) + sizeof(Point)) + (n_qp * k * sizeof(KPoint)) + (getSStackSizeInBytes(n, thread_num, block_num)); } void populateTrivialResult(int n_qp, int k, int n_tree, int *result) { #pragma omp parallel for for (int i = 0; i < n_qp; ++i) { for (int j = 0; j < k; ++j) { result[i * k + j] = j % n_tree; } } } template<int stack_size> __global__ void dQueryAll(struct Point *query_points, struct Node *tree, int n_qp, int n_tree, int k, struct KPoint *k_stack_ptr) { SPoint stack[stack_size]; int tid = threadIdx.x, block_step, block_offset; cuCalculateBlockOffsetAndNoOfQueries(n_qp, block_step, block_offset); query_points += block_offset; k_stack_ptr += block_offset * k; while (tid < block_step) { cuKNN(query_points[tid], tree, n_tree, k, stack, k_stack_ptr + (tid * k)); tid += blockDim.x; } } void getThreadAndBlockCountForQueryAll(int n, int &blocks, int &threads) { threads = THREADS_PER_BLOCK_SEARCH; blocks = n / threads; blocks = min(MAX_BLOCK_DIM_SIZE, blocks); blocks = max(1, blocks); // printf("blocks = %d, threads = %d, n= %d\n", blocks, threads, n); } int getQueriesInStep(int n_qp, int k, int n) { int numBlocks, numThreads; size_t needed_bytes_total, free_bytes; free_bytes = getFreeBytesOnGpu(); getThreadAndBlockCountForQueryAll(n_qp, numThreads, numBlocks); needed_bytes_total = getNeededBytesInSearch(n_qp, k, n, numThreads, numBlocks); if (free_bytes > needed_bytes_total) return n_qp; if (n_qp < 50) return -1; return getQueriesInStep((n_qp / 2), k, n); } int nextPowerOf2___(int x) { --x; x |= x >> 1; x |= x >> 2; x |= x >> 4; x |= x >> 8; x |= x >> 16; return ++x; } void templateQueryAll(struct Point *d_query_points, struct Node *d_tree, int queries_in_step, int n_tree, int k, int stack_size, int numBlocks, int numThreads, struct KPoint *d_k_stack) { if (stack_size <= 20 ) { hipLaunchKernelGGL(( dQueryAll<20>) , dim3(numBlocks), dim3(numThreads), 0, 0, d_query_points, d_tree, queries_in_step, n_tree, k, d_k_stack); } else if (stack_size <= 25) { hipLaunchKernelGGL(( dQueryAll<25>) , dim3(numBlocks), dim3(numThreads), 0, 0, d_query_points, d_tree, queries_in_step, n_tree, k, d_k_stack); } else if (stack_size <= 30) { hipLaunchKernelGGL(( dQueryAll<30>) , dim3(numBlocks), dim3(numThreads), 0, 0, d_query_points, d_tree, queries_in_step, n_tree, k, d_k_stack); } else { hipLaunchKernelGGL(( dQueryAll<35>) , dim3(numBlocks), dim3(numThreads), 0, 0, d_query_points, d_tree, queries_in_step, n_tree, k, d_k_stack); } } void cuQueryAll(struct Point *h_query_points, struct Node *h_tree, int n_qp, int n_tree, int k, int *h_result) { int numBlocks, numThreads, queries_in_step, queries_done, stack_size; struct Node *d_tree; struct KPoint *d_k_stack, *h_k_stack; struct SPoint *d_stack; struct Point *d_query_points; if (k >= n_tree) { populateTrivialResult(n_qp, k, n_tree, h_result); return; } checkCudaErrors( hipDeviceSetCacheConfig(hipFuncCachePreferL1)); checkCudaErrors(hipMalloc(&d_tree, n_tree * sizeof(Node))); checkCudaErrors(hipMemcpy(d_tree, h_tree, n_tree * sizeof(Node), hipMemcpyHostToDevice)); queries_in_step = getQueriesInStep(n_qp, k, n_tree); if (queries_in_step <= 0) { printf("There is not enough memory to perform this queries on cuda.\n"); return; } queries_done = 0; stack_size = getSStackSize(n_tree); queries_in_step = nextPowerOf2___(++queries_in_step) >> 1; getThreadAndBlockCountForQueryAll(queries_in_step, numBlocks, numThreads); h_k_stack = (KPoint *) malloc(queries_in_step * k * sizeof(KPoint)); checkCudaErrors(hipMalloc(&d_k_stack, queries_in_step * k * sizeof(KPoint))); checkCudaErrors(hipMalloc(&d_stack, numThreads * numBlocks * stack_size * sizeof(SPoint))); checkCudaErrors(hipMalloc(&d_query_points, queries_in_step * sizeof(Point))); while (queries_done < n_qp) { if (queries_done + queries_in_step > n_qp) { queries_in_step = n_qp - queries_done; } checkCudaErrors(hipMemcpy(d_query_points, h_query_points, queries_in_step * sizeof(Point), hipMemcpyHostToDevice)); templateQueryAll(d_query_points, d_tree, queries_in_step, n_tree, k, stack_size, numBlocks, numThreads, d_k_stack); checkCudaErrors(hipMemcpy(h_k_stack, d_k_stack, queries_in_step * k * sizeof(KPoint), hipMemcpyDeviceToHost)); # pragma omp parallel for for (int i = 0; i < queries_in_step ; ++i) { for (int j = 0; j < k; ++j) { h_result[i * k + j] = h_k_stack[i * k + j].index; } } h_query_points += queries_in_step; h_result += (queries_in_step * k); queries_done += queries_in_step; } free(h_k_stack); checkCudaErrors(hipFree(d_query_points)); checkCudaErrors(hipFree(d_k_stack)); checkCudaErrors(hipFree(d_tree)); }
63ab48dc491aa1fafb08701d18f1b4739f3e0c44.cu
#include <stdlib.h> #include <math.h> #include <float.h> #include <helper_cuda.h> #include <cu-kd-search.cuh> #include "kd-tree-build.cuh" __device__ __host__ float cuDist(struct Point qp, struct Node point) { float dx = qp.p[0] - point.p[0], dy = qp.p[1] - point.p[1], dz = qp.p[2] - point.p[2]; return (dx * dx) + (dy * dy) + (dz * dz); } __device__ __host__ void cuInitStack(struct SPoint **stack) { (*stack)[0].index = -1; (*stack)++; } __device__ __host__ bool cuIsEmpty(struct SPoint *stack) { return cuPeek(stack).index == -1; } __device__ __host__ void cuPush(struct SPoint **stack, struct SPoint value) { *((*stack)++) = value; } __device__ __host__ struct SPoint cuPop(struct SPoint **stack) { return *(--(*stack)); } __device__ __host__ struct SPoint cuPeek(struct SPoint *stack) { return *(stack - 1); } __device__ __host__ void cuInitKStack(struct KPoint **k_stack, int n) { (*k_stack)--; for (int i = 1; i <= n; ++i) { (*k_stack)[i].dist = FLT_MAX; (*k_stack)[i].index = -1; } } __device__ __host__ void cuInsert(struct KPoint *k_stack, struct KPoint k_point, int n) { int i_child, now; struct KPoint child, child_tmp_2; for (now = 1; now * 2 <= n ; now = i_child) { i_child = now * 2; child = k_stack[i_child]; child_tmp_2 = k_stack[i_child + 1]; if (i_child <= n && child_tmp_2.dist > child.dist ) { i_child++; child = child_tmp_2; } if (i_child <= n && k_point.dist < child.dist) { k_stack[now] = child; } else { break; } } k_stack[now] = k_point; } __device__ __host__ struct KPoint cuLook(struct KPoint *k_stack) { return k_stack[1]; } __device__ __host__ void cuUpDim(int &dim) { dim = (dim + 1) % 3; } __device__ __host__ void cuChildren(struct Point qp, struct Node current, float dx, int &target, int &other) { if (dx > 0) { other = current.right; target = current.left; } else { other = current.left; target = current.right; } } __device__ __host__ void cuKNN(struct Point qp, struct Node *tree, int n, int k, struct SPoint *stack, struct KPoint *k_stack) { int dim = 2, target; float current_dist; struct Node current_point; struct SPoint current; struct KPoint worst_best; current.index = n / 2; cuInitStack(&stack); cuInitKStack(&k_stack, k); worst_best = cuLook(k_stack); while (!cuIsEmpty(stack) || current.index != -1) { if (current.index == -1 && !cuIsEmpty(stack)) { current = cuPop(&stack); dim = current.dim; current.index = (current.dx * current.dx < worst_best.dist) ? current.other : -1; } else { current_point = tree[current.index]; current_dist = cuDist(qp, current_point); if (worst_best.dist > current_dist) { worst_best.dist = current_dist; worst_best.index = current.index; cuInsert(k_stack, worst_best, k); worst_best = cuLook(k_stack); } cuUpDim(dim); current.dim = dim; current.dx = current_point.p[dim] - qp.p[dim]; cuChildren(qp, current_point, current.dx, target, current.other); cuPush(&stack, current); current.index = target; } } } __device__ __host__ int fastIntegerLog2(int x) { int y = 0; while (x >>= 1) { y++; } return y; } __device__ void cuCalculateBlockOffsetAndNoOfQueries(int n, int &n_per_block, int &block_offset) { int rest = n % gridDim.x; n_per_block = n / gridDim.x; block_offset = n_per_block * blockIdx.x; if (rest >= gridDim.x - blockIdx.x) { block_offset += rest - (gridDim.x - blockIdx.x); n_per_block++; } } __device__ __host__ int getSStackSize(int n) { return fastIntegerLog2(n) + 2; } size_t getSStackSizeInBytes(int n, int thread_num, int block_num) { return block_num * thread_num * ((getSStackSize(n) * sizeof(SPoint))); } size_t getNeededBytesInSearch(int n_qp, int k, int n, int thread_num, int block_num) { return n_qp * (k * sizeof(int) + sizeof(Point)) + (n_qp * k * sizeof(KPoint)) + (getSStackSizeInBytes(n, thread_num, block_num)); } void populateTrivialResult(int n_qp, int k, int n_tree, int *result) { #pragma omp parallel for for (int i = 0; i < n_qp; ++i) { for (int j = 0; j < k; ++j) { result[i * k + j] = j % n_tree; } } } template<int stack_size> __global__ void dQueryAll(struct Point *query_points, struct Node *tree, int n_qp, int n_tree, int k, struct KPoint *k_stack_ptr) { SPoint stack[stack_size]; int tid = threadIdx.x, block_step, block_offset; cuCalculateBlockOffsetAndNoOfQueries(n_qp, block_step, block_offset); query_points += block_offset; k_stack_ptr += block_offset * k; while (tid < block_step) { cuKNN(query_points[tid], tree, n_tree, k, stack, k_stack_ptr + (tid * k)); tid += blockDim.x; } } void getThreadAndBlockCountForQueryAll(int n, int &blocks, int &threads) { threads = THREADS_PER_BLOCK_SEARCH; blocks = n / threads; blocks = min(MAX_BLOCK_DIM_SIZE, blocks); blocks = max(1, blocks); // printf("blocks = %d, threads = %d, n= %d\n", blocks, threads, n); } int getQueriesInStep(int n_qp, int k, int n) { int numBlocks, numThreads; size_t needed_bytes_total, free_bytes; free_bytes = getFreeBytesOnGpu(); getThreadAndBlockCountForQueryAll(n_qp, numThreads, numBlocks); needed_bytes_total = getNeededBytesInSearch(n_qp, k, n, numThreads, numBlocks); if (free_bytes > needed_bytes_total) return n_qp; if (n_qp < 50) return -1; return getQueriesInStep((n_qp / 2), k, n); } int nextPowerOf2___(int x) { --x; x |= x >> 1; x |= x >> 2; x |= x >> 4; x |= x >> 8; x |= x >> 16; return ++x; } void templateQueryAll(struct Point *d_query_points, struct Node *d_tree, int queries_in_step, int n_tree, int k, int stack_size, int numBlocks, int numThreads, struct KPoint *d_k_stack) { if (stack_size <= 20 ) { dQueryAll<20> <<< numBlocks, numThreads>>>(d_query_points, d_tree, queries_in_step, n_tree, k, d_k_stack); } else if (stack_size <= 25) { dQueryAll<25> <<< numBlocks, numThreads>>>(d_query_points, d_tree, queries_in_step, n_tree, k, d_k_stack); } else if (stack_size <= 30) { dQueryAll<30> <<< numBlocks, numThreads>>>(d_query_points, d_tree, queries_in_step, n_tree, k, d_k_stack); } else { dQueryAll<35> <<< numBlocks, numThreads>>>(d_query_points, d_tree, queries_in_step, n_tree, k, d_k_stack); } } void cuQueryAll(struct Point *h_query_points, struct Node *h_tree, int n_qp, int n_tree, int k, int *h_result) { int numBlocks, numThreads, queries_in_step, queries_done, stack_size; struct Node *d_tree; struct KPoint *d_k_stack, *h_k_stack; struct SPoint *d_stack; struct Point *d_query_points; if (k >= n_tree) { populateTrivialResult(n_qp, k, n_tree, h_result); return; } checkCudaErrors( cudaDeviceSetCacheConfig(cudaFuncCachePreferL1)); checkCudaErrors(cudaMalloc(&d_tree, n_tree * sizeof(Node))); checkCudaErrors(cudaMemcpy(d_tree, h_tree, n_tree * sizeof(Node), cudaMemcpyHostToDevice)); queries_in_step = getQueriesInStep(n_qp, k, n_tree); if (queries_in_step <= 0) { printf("There is not enough memory to perform this queries on cuda.\n"); return; } queries_done = 0; stack_size = getSStackSize(n_tree); queries_in_step = nextPowerOf2___(++queries_in_step) >> 1; getThreadAndBlockCountForQueryAll(queries_in_step, numBlocks, numThreads); h_k_stack = (KPoint *) malloc(queries_in_step * k * sizeof(KPoint)); checkCudaErrors(cudaMalloc(&d_k_stack, queries_in_step * k * sizeof(KPoint))); checkCudaErrors(cudaMalloc(&d_stack, numThreads * numBlocks * stack_size * sizeof(SPoint))); checkCudaErrors(cudaMalloc(&d_query_points, queries_in_step * sizeof(Point))); while (queries_done < n_qp) { if (queries_done + queries_in_step > n_qp) { queries_in_step = n_qp - queries_done; } checkCudaErrors(cudaMemcpy(d_query_points, h_query_points, queries_in_step * sizeof(Point), cudaMemcpyHostToDevice)); templateQueryAll(d_query_points, d_tree, queries_in_step, n_tree, k, stack_size, numBlocks, numThreads, d_k_stack); checkCudaErrors(cudaMemcpy(h_k_stack, d_k_stack, queries_in_step * k * sizeof(KPoint), cudaMemcpyDeviceToHost)); # pragma omp parallel for for (int i = 0; i < queries_in_step ; ++i) { for (int j = 0; j < k; ++j) { h_result[i * k + j] = h_k_stack[i * k + j].index; } } h_query_points += queries_in_step; h_result += (queries_in_step * k); queries_done += queries_in_step; } free(h_k_stack); checkCudaErrors(cudaFree(d_query_points)); checkCudaErrors(cudaFree(d_k_stack)); checkCudaErrors(cudaFree(d_tree)); }
a28687bbf17326b09c4f2c2424b3f643ae66e5b4.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include <stdio.h> //////////////////////////float3//////////////////////////////// inline __device__ float3 operator+(float3 a, float b) { return make_float3(a.x + b, a.y + b, a.z + b); } inline __device__ float3 operator-(float3 a, float b) { return make_float3(a.x - b, a.y - b, a.z - b); } inline __device__ float3 operator+(float3 a, float3 b) { return make_float3(a.x + b.x, a.y + b.y, a.z + b.z); } inline __device__ float3 operator-(float3 a, float3 b) { return make_float3(a.x - b.x, a.y - b.y, a.z - b.z); } inline __device__ void operator+=(float3 &a, float3 b) { a.x += b.x; a.y += b.y; a.z += b.z; } inline __device__ void operator-=(float3 &a, float3 b) { a.x -= b.x; a.y -= b.y; a.z -= b.z; } inline __device__ float3 operator/(float3 a, float3 b) { return make_float3(a.x/b.x, a.y/b.y, a.z/b.z); } inline __device__ float3 operator*(float3 a, float b) { return make_float3(a.x*b, a.y*b, a.z*b); } inline __device__ float3 operator*(float a, float3 b) { return make_float3(a*b.x, a*b.y, a*b.z); } inline __device__ float3 operator*(float3 a, float3 b) { return make_float3(a.x*b.x, a.y*b.y, a.z*b.z); } inline __device__ float3 operator*(float3 a, int3 b) { return make_float3(a.x*b.x, a.y*b.y, a.z*b.z); } ///////////////////////////int3///////////////////////////////// inline __device__ int3 operator+(int3 a, int3 b) { return make_int3(a.x + b.x, a.y + b.y, a.z + b.z); } inline __device__ int3 operator-(int3 a, int3 b) { return make_int3(a.x - b.x, a.y - b.y, a.z - b.z); } inline __device__ int3 operator+(int3 a, int b) { return make_int3(a.x + b, a.y + b, a.z + b); } inline __device__ int3 operator-(int3 a, int b) { return make_int3(a.x - b, a.y - b, a.z - b); } inline __device__ int3 operator+(int a, int3 b) { return make_int3(a + b.x, a + b.y, a + b.z); } inline __device__ int3 operator-(int a, int3 b) { return make_int3(a - b.x, a - b.y, a - b.z); } //////////////////////////////////////////////////////////////// inline __device__ int3 clamp(int3 x, int a, int3 b) { return make_int3(max(a, min(x.x, b.x)), max(a, min(x.y, b.y)), max(a, min(x.z, b.z))); } inline __device__ int3 clamp(int3 x, int3 a, int b) { return make_int3(max(a.x, min(x.x, b)), max(a.y, min(x.y, b)), max(a.z, min(x.z, b))); } inline __device__ int3 floorf(float3 v) { return make_int3(floorf(v.x), floorf(v.y), floorf(v.z)); } inline __device__ float3 round(float3 v) { return make_float3(round(v.x), round(v.y), round(v.z)); } inline __device__ int dot(int3 a, int3 b) { return a.x*b.x + a.y*b.y + a.z*b.z; } inline __device__ float dot(float3 a, float3 b) { return a.x*b.x + a.y*b.y + a.z*b.z; } inline __device__ int mod(int a, int b) { int k = a % b; return (k < 0) ? (k + b) : k; } inline __device__ int3 mod(int3 a, int3 b) { return make_int3(mod(a.x,b.x), mod(a.y,b.y), mod(a.z,b.z)); } //////////////////////////////////////////////////////////////// struct BinStep { int3 positive, negative; }; inline __device__ int3 getLocalBinIndex(float3 coordinate, float3 binLength, int3 binDim) { return clamp(floorf(coordinate/binLength), 0, binDim - 1); } inline __device__ int getBinIndex(int3 localBinIndex, int3 binDim) { return dot(localBinIndex, make_int3(1, binDim.x, binDim.x*binDim.y)); } inline __device__ void stepLimit(int &positive, int &negative, int binDim) { if (positive - negative > binDim - 1) { if (positive > -negative) { positive = negative + binDim - 1; } else { negative = positive - binDim + 1; } } } inline __device__ void lennardJones(float3 &ljF, float &ljU, float3 R, float r2, float eps, float sig) { float sr = sig*sig/r2; sr = sr*sr*sr; ljF += eps/r2*(2.0f*sr*sr - sr)*R; ljU += eps*(sr*sr - sr); } __device__ BinStep getBinStep(float3 coordinate, float3 binLength, int3 binDim, int3 localBinIndex, float cutoff) { struct BinStep binStep; binStep.positive = clamp(floorf((coordinate + cutoff)/binLength) - localBinIndex, 0, binDim - 1); binStep.negative = clamp(floorf((coordinate - cutoff)/binLength) - localBinIndex, 1 - binDim, 0); stepLimit(binStep.positive.x, binStep.negative.x, binDim.x); stepLimit(binStep.positive.y, binStep.negative.y, binDim.y); stepLimit(binStep.positive.z, binStep.negative.z, binDim.z); return binStep; } //////////////////////////////////////////////////////////////// extern "C" __global__ void fillBins(float3 *coordinate, int *binIndex, unsigned int *binCount, float3 binLength, int3 binDim, unsigned int arraySize) { unsigned int idg = blockIdx.x*blockDim.x + threadIdx.x; if (idg < arraySize) { int idB = getBinIndex(getLocalBinIndex(coordinate[idg], binLength, binDim), binDim); binIndex[idg] = idB; atomicInc(&binCount[idB], arraySize); } } extern "C" __global__ void countingSort(int *binIndex, unsigned int *prefixSum, float3 *coordinate, float3 *velocity, float3 *coordinateSorted, float3 *velocitySorted, unsigned int arraySize) { unsigned int idg = blockIdx.x*blockDim.x + threadIdx.x; if (idg < arraySize) { unsigned int idgSorted = atomicDec(&prefixSum[binIndex[idg]], arraySize) - 1u; coordinateSorted[idgSorted] = coordinate[idg]; velocitySorted[idgSorted] = velocity[idg]; } } extern "C" __global__ void ljForce(float3 *coordinateSorted, float3 *force, float *potentialEnergy, unsigned int *binCount, unsigned int *prefixSum, float3 boxSize, float3 binLength, int3 binDim, float cutoff, float eps, float sig, unsigned int arraySize) { unsigned int idg = blockIdx.x*blockDim.x + threadIdx.x; if (idg < arraySize) { float3 R; float r2; int binIndexNeighbour; unsigned int ionCount, offset; float3 coordinate = coordinateSorted[idg]; int3 localBinIndex = getLocalBinIndex(coordinate, binLength, binDim); struct BinStep binStep = getBinStep(coordinate, binLength, binDim, localBinIndex, cutoff); float3 ljF = make_float3(0.0f,0.0f,0.0f); float cutoff2 = cutoff*cutoff; float ljU = 0.0f; for (int dz = binStep.negative.z; dz <= binStep.positive.z; ++dz) { for (int dy = binStep.negative.y; dy <= binStep.positive.y; ++dy) { for (int dx = binStep.negative.x; dx <= binStep.positive.x; ++dx) { binIndexNeighbour = getBinIndex(mod(localBinIndex + make_int3(dx,dy,dz), binDim), binDim); ionCount = binCount[binIndexNeighbour]; if (ionCount == 0u) { continue; } offset = prefixSum[binIndexNeighbour]; for (unsigned int i = offset; i < offset + ionCount; ++i) { if (i == idg) { continue; } R = coordinate - coordinateSorted[i]; r2 = dot(R, R); if (r2 < cutoff2) { lennardJones(ljF, ljU, R, r2, eps, sig); continue; } R -= boxSize*round(R/boxSize); r2 = dot(R, R); if (r2 < cutoff2) { lennardJones(ljF, ljU, R, r2, eps, sig); } } } } } force[idg] = 24.0f*ljF; potentialEnergy[idg] = 2.0f*ljU; } } extern "C" __global__ void verletPre(float3 *coordinate, float3 *velocity, float3 *coordinateSorted, float3 *velocitySorted, float3 *force, float3 boxSize, float mass, float dt, unsigned int arraySize) { unsigned int idg = blockIdx.x*blockDim.x + threadIdx.x; if (idg < arraySize) { float3 coord, vel; vel = velocitySorted[idg]; coord = coordinateSorted[idg]; vel += 0.00482426665f*dt/mass*force[idg]; // 0.00964853329 * 0.5 coord += dt*vel; velocity[idg] = vel; coordinate[idg] = coord - boxSize*floorf(coord/boxSize); } } extern "C" __global__ void verletPos(float3 *velocitySorted, float3 *force, float *kineticEnergy, float mass, float dt, unsigned int arraySize) { unsigned int idg = blockIdx.x*blockDim.x + threadIdx.x; if (idg < arraySize) { float3 vel = velocitySorted[idg]; vel += 0.00482426665f*dt/mass*force[idg]; // 0.00964853329 * 0.5 kineticEnergy[idg] = 51.8213479f*mass*dot(vel, vel); velocitySorted[idg] = vel; } }
a28687bbf17326b09c4f2c2424b3f643ae66e5b4.cu
#include <stdio.h> //////////////////////////float3//////////////////////////////// inline __device__ float3 operator+(float3 a, float b) { return make_float3(a.x + b, a.y + b, a.z + b); } inline __device__ float3 operator-(float3 a, float b) { return make_float3(a.x - b, a.y - b, a.z - b); } inline __device__ float3 operator+(float3 a, float3 b) { return make_float3(a.x + b.x, a.y + b.y, a.z + b.z); } inline __device__ float3 operator-(float3 a, float3 b) { return make_float3(a.x - b.x, a.y - b.y, a.z - b.z); } inline __device__ void operator+=(float3 &a, float3 b) { a.x += b.x; a.y += b.y; a.z += b.z; } inline __device__ void operator-=(float3 &a, float3 b) { a.x -= b.x; a.y -= b.y; a.z -= b.z; } inline __device__ float3 operator/(float3 a, float3 b) { return make_float3(a.x/b.x, a.y/b.y, a.z/b.z); } inline __device__ float3 operator*(float3 a, float b) { return make_float3(a.x*b, a.y*b, a.z*b); } inline __device__ float3 operator*(float a, float3 b) { return make_float3(a*b.x, a*b.y, a*b.z); } inline __device__ float3 operator*(float3 a, float3 b) { return make_float3(a.x*b.x, a.y*b.y, a.z*b.z); } inline __device__ float3 operator*(float3 a, int3 b) { return make_float3(a.x*b.x, a.y*b.y, a.z*b.z); } ///////////////////////////int3///////////////////////////////// inline __device__ int3 operator+(int3 a, int3 b) { return make_int3(a.x + b.x, a.y + b.y, a.z + b.z); } inline __device__ int3 operator-(int3 a, int3 b) { return make_int3(a.x - b.x, a.y - b.y, a.z - b.z); } inline __device__ int3 operator+(int3 a, int b) { return make_int3(a.x + b, a.y + b, a.z + b); } inline __device__ int3 operator-(int3 a, int b) { return make_int3(a.x - b, a.y - b, a.z - b); } inline __device__ int3 operator+(int a, int3 b) { return make_int3(a + b.x, a + b.y, a + b.z); } inline __device__ int3 operator-(int a, int3 b) { return make_int3(a - b.x, a - b.y, a - b.z); } //////////////////////////////////////////////////////////////// inline __device__ int3 clamp(int3 x, int a, int3 b) { return make_int3(max(a, min(x.x, b.x)), max(a, min(x.y, b.y)), max(a, min(x.z, b.z))); } inline __device__ int3 clamp(int3 x, int3 a, int b) { return make_int3(max(a.x, min(x.x, b)), max(a.y, min(x.y, b)), max(a.z, min(x.z, b))); } inline __device__ int3 floorf(float3 v) { return make_int3(floorf(v.x), floorf(v.y), floorf(v.z)); } inline __device__ float3 round(float3 v) { return make_float3(round(v.x), round(v.y), round(v.z)); } inline __device__ int dot(int3 a, int3 b) { return a.x*b.x + a.y*b.y + a.z*b.z; } inline __device__ float dot(float3 a, float3 b) { return a.x*b.x + a.y*b.y + a.z*b.z; } inline __device__ int mod(int a, int b) { int k = a % b; return (k < 0) ? (k + b) : k; } inline __device__ int3 mod(int3 a, int3 b) { return make_int3(mod(a.x,b.x), mod(a.y,b.y), mod(a.z,b.z)); } //////////////////////////////////////////////////////////////// struct BinStep { int3 positive, negative; }; inline __device__ int3 getLocalBinIndex(float3 coordinate, float3 binLength, int3 binDim) { return clamp(floorf(coordinate/binLength), 0, binDim - 1); } inline __device__ int getBinIndex(int3 localBinIndex, int3 binDim) { return dot(localBinIndex, make_int3(1, binDim.x, binDim.x*binDim.y)); } inline __device__ void stepLimit(int &positive, int &negative, int binDim) { if (positive - negative > binDim - 1) { if (positive > -negative) { positive = negative + binDim - 1; } else { negative = positive - binDim + 1; } } } inline __device__ void lennardJones(float3 &ljF, float &ljU, float3 R, float r2, float eps, float sig) { float sr = sig*sig/r2; sr = sr*sr*sr; ljF += eps/r2*(2.0f*sr*sr - sr)*R; ljU += eps*(sr*sr - sr); } __device__ BinStep getBinStep(float3 coordinate, float3 binLength, int3 binDim, int3 localBinIndex, float cutoff) { struct BinStep binStep; binStep.positive = clamp(floorf((coordinate + cutoff)/binLength) - localBinIndex, 0, binDim - 1); binStep.negative = clamp(floorf((coordinate - cutoff)/binLength) - localBinIndex, 1 - binDim, 0); stepLimit(binStep.positive.x, binStep.negative.x, binDim.x); stepLimit(binStep.positive.y, binStep.negative.y, binDim.y); stepLimit(binStep.positive.z, binStep.negative.z, binDim.z); return binStep; } //////////////////////////////////////////////////////////////// extern "C" __global__ void fillBins(float3 *coordinate, int *binIndex, unsigned int *binCount, float3 binLength, int3 binDim, unsigned int arraySize) { unsigned int idg = blockIdx.x*blockDim.x + threadIdx.x; if (idg < arraySize) { int idB = getBinIndex(getLocalBinIndex(coordinate[idg], binLength, binDim), binDim); binIndex[idg] = idB; atomicInc(&binCount[idB], arraySize); } } extern "C" __global__ void countingSort(int *binIndex, unsigned int *prefixSum, float3 *coordinate, float3 *velocity, float3 *coordinateSorted, float3 *velocitySorted, unsigned int arraySize) { unsigned int idg = blockIdx.x*blockDim.x + threadIdx.x; if (idg < arraySize) { unsigned int idgSorted = atomicDec(&prefixSum[binIndex[idg]], arraySize) - 1u; coordinateSorted[idgSorted] = coordinate[idg]; velocitySorted[idgSorted] = velocity[idg]; } } extern "C" __global__ void ljForce(float3 *coordinateSorted, float3 *force, float *potentialEnergy, unsigned int *binCount, unsigned int *prefixSum, float3 boxSize, float3 binLength, int3 binDim, float cutoff, float eps, float sig, unsigned int arraySize) { unsigned int idg = blockIdx.x*blockDim.x + threadIdx.x; if (idg < arraySize) { float3 R; float r2; int binIndexNeighbour; unsigned int ionCount, offset; float3 coordinate = coordinateSorted[idg]; int3 localBinIndex = getLocalBinIndex(coordinate, binLength, binDim); struct BinStep binStep = getBinStep(coordinate, binLength, binDim, localBinIndex, cutoff); float3 ljF = make_float3(0.0f,0.0f,0.0f); float cutoff2 = cutoff*cutoff; float ljU = 0.0f; for (int dz = binStep.negative.z; dz <= binStep.positive.z; ++dz) { for (int dy = binStep.negative.y; dy <= binStep.positive.y; ++dy) { for (int dx = binStep.negative.x; dx <= binStep.positive.x; ++dx) { binIndexNeighbour = getBinIndex(mod(localBinIndex + make_int3(dx,dy,dz), binDim), binDim); ionCount = binCount[binIndexNeighbour]; if (ionCount == 0u) { continue; } offset = prefixSum[binIndexNeighbour]; for (unsigned int i = offset; i < offset + ionCount; ++i) { if (i == idg) { continue; } R = coordinate - coordinateSorted[i]; r2 = dot(R, R); if (r2 < cutoff2) { lennardJones(ljF, ljU, R, r2, eps, sig); continue; } R -= boxSize*round(R/boxSize); r2 = dot(R, R); if (r2 < cutoff2) { lennardJones(ljF, ljU, R, r2, eps, sig); } } } } } force[idg] = 24.0f*ljF; potentialEnergy[idg] = 2.0f*ljU; } } extern "C" __global__ void verletPre(float3 *coordinate, float3 *velocity, float3 *coordinateSorted, float3 *velocitySorted, float3 *force, float3 boxSize, float mass, float dt, unsigned int arraySize) { unsigned int idg = blockIdx.x*blockDim.x + threadIdx.x; if (idg < arraySize) { float3 coord, vel; vel = velocitySorted[idg]; coord = coordinateSorted[idg]; vel += 0.00482426665f*dt/mass*force[idg]; // 0.00964853329 * 0.5 coord += dt*vel; velocity[idg] = vel; coordinate[idg] = coord - boxSize*floorf(coord/boxSize); } } extern "C" __global__ void verletPos(float3 *velocitySorted, float3 *force, float *kineticEnergy, float mass, float dt, unsigned int arraySize) { unsigned int idg = blockIdx.x*blockDim.x + threadIdx.x; if (idg < arraySize) { float3 vel = velocitySorted[idg]; vel += 0.00482426665f*dt/mass*force[idg]; // 0.00964853329 * 0.5 kineticEnergy[idg] = 51.8213479f*mass*dot(vel, vel); velocitySorted[idg] = vel; } }
e69b666110d25ac93023e77f3357351f501b68ec.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include <iostream> #include <string.h> #include <experimental/filesystem> #include <list> #include "opencv2/imgcodecs.hpp" #include "opencv2/highgui.hpp" #include "opencv2/imgproc.hpp" #include <armadillo> #include <chrono> #include <time.h> #include <omp.h> #include "svd3_cuda.h" #include "stdio.h" using namespace std; using namespace cv; namespace fs = std::experimental::filesystem::v1; #define THREAD_MAX 4 #define Threads 32 void extract_LSBP(Mat frame, Mat &output, int tau); void video(string); void cdnet(string, int); void extract_LSBP_v2(Mat frame, Mat change_frame, Mat last_lsbp, Mat &output, int tau); Mat SVD_init(Mat frame, int samples); Mat SVD_step(Mat, int, int, int, double, double); double _SVD(arma::mat matriz);// return the singular values sum (s[1]+s[2])/s[0] int clip(int i, int inferior, int superior, int val_range); int Hamming_distance(Mat svd_frame, Mat svd_sample, int i, int j, double tau); void export_mat_excel(Mat img, string name); void update_samples_lsbp(); double get_distance_L1(double b1, double b2, double g1, double g2, double r1, double r2); double min_distance_L1(double b1, double b2, double g1, double g2, double r1, double r2); void init_change_lsbp(); void init_zeros_change_lsbp(); bool validate_change(float, float, float, float, float, float, float, float, float); void* LSBP_parallel(void* arg); void* LSBP1_parallel(void* arg); void* SVD_step_parallell(void * arg); void extract_LSBP_cuda(Mat frame, Mat &output); __global__ void add(float *m1, float *m3, int filas, int columnas) { int col = blockIdx.x*blockDim.x+threadIdx.x; int fil = blockIdx.y*blockDim.y+threadIdx.y; int col_min = col-1; int col_max = col+1; int fil_min = fil-1; int fil_max = fil+1; if(col < columnas && fil < filas && col_min >= 0 && fil_min >= 0 && fil_max < (filas) && col_max < (columnas)) { int indexi0j0 = (columnas*fil_min)+col_min; int indexi0j1 = (columnas*fil_min)+col; int indexi0j2 = (columnas*fil_min)+col_max; int indexi1j0 = (columnas*fil)+col_min; int indexij = (columnas*fil)+col; int indexi1j2 = (columnas*fil)+col_max; int indexi2j0 = (columnas*fil_max)+col_min; int indexi2j1 = (columnas*fil_max)+col; int indexi2j2 = (columnas*fil_max)+col_max; float u11, u12, u13, u21, u22, u23, u31, u32, u33; float s11, s12, s13, s21, s22, s23, s31, s32, s33; float v11, v12, v13, v21, v22, v23, v31, v32, v33; svd(m1[indexi0j0], m1[indexi0j1], m1[indexi0j2], m1[indexi1j0], m1[indexij], m1[indexi1j2], m1[indexi2j0], m1[indexi2j1], m1[indexi2j2], // output U u11, u12, u13, u21, u22, u23, u31, u32, u33, // output S s11, s12, s13, s21, s22, s23, s31, s32, s33, // output V v11, v12, v13, v21, v22, v23, v31, v32, v33); //m3[index] = m1[index]+m2[index]; /*m3[indexij] = m1[indexi0j0]+m1[indexi0j1]+m1[indexi0j2]+ m1[indexi1j0]+m1[indexij]+m1[indexi1j2]+ m1[indexi2j0]+m1[indexi2j1]+m1[indexi2j2];*/ //m3[indexij] = s11+s22+s33; m3[indexij] = (s22+s33)/s11; } } Mat global_intensity_fr, global_change_frame, global_last_lsbp, global_output; Mat global_frame, global_mask, global_svd; int part=0; Mat D, fr, lsbp; Mat R, T; list<Mat> samples_lsbp; list<Mat> samples_frame; list<Mat> samples_change_lsbp; int heigth, width; int main() { string pathVideoName = "sanPablo/SanPabloVideo3.mp4"; string pathCDnet = "office/"; bool isVideo = false; if(isVideo) video(pathVideoName); else { int startFrame = 570; cdnet(pathCDnet,startFrame); } return 0; } void video(string pathVideo) { string PATH = "peopleInShade/"; srand(time(NULL)); auto duration =0; int samples = 10; Mat img; //img = imread(PATH+"input/in000001.jpg", CV_LOAD_IMAGE_COLOR); VideoCapture cap(pathVideo); int i=0;// Mat ones = Mat::ones(2, 3, CV_32FC1)*0.2; namedWindow("imagen", WINDOW_AUTOSIZE); namedWindow("input", WINDOW_AUTOSIZE); //imshow("imagen", img); //Mat result = SVD_init(img, samples); waitKey(1); init_change_lsbp(); if(!cap.isOpened()) { cout << "Error opening video stream" << endl; } while(1) { Mat frame; cap >> frame; if(i==0) { heigth = frame.cols; width = frame.rows; R = Mat::ones(width, heigth, CV_32FC1)*30.0; D = Mat::ones(width, heigth, CV_32FC1)*0.0; T = Mat::ones(width, heigth, CV_32FC1)*0.08; cout << "heigth: " << R.cols << endl; cout << "width: " << width << endl; Mat result = SVD_init(frame, samples); } else { //cout << "=========: " << i << endl; //Only to read auto t11 = std::chrono::high_resolution_clock::now(); //waitKey(5000); //cout << "Step0" << endl; Mat result = SVD_step(frame, 6, 2, 5, 0.05, 0.02); imshow("imagen", result); imshow("input", frame); //cout << "Step1" << endl; char c=(char)waitKey(1); if(c==1) break; auto t12 = std::chrono::high_resolution_clock::now(); duration = std::chrono::duration_cast<std::chrono::milliseconds>(t12 - t11).count(); cout << "Frame: " << i <<" Time: " << duration << endl; } i++; } } void cdnet(string PATH, int starFrame) { //string PATH = "highway/"; srand(time(NULL)); auto duration =0; int samples = 10; Mat img; img = imread(PATH+"input/in000001.jpg", CV_LOAD_IMAGE_COLOR); heigth = img.cols; width = img.rows; Mat ones = Mat::ones(2, 3, CV_32FC1)*0.2; R = Mat::ones(width, heigth, CV_32FC1)*30.0; D = Mat::ones(width, heigth, CV_32FC1)*0.0; T = Mat::ones(width, heigth, CV_32FC1)*0.08; namedWindow("imagen", WINDOW_AUTOSIZE); namedWindow("input", WINDOW_AUTOSIZE); imshow("imagen", img); Mat result = SVD_init(img, samples); waitKey(1); init_change_lsbp(); for(int f=starFrame; f<=1699; f++) { //Only to read if(f<10) img = imread(PATH+"input/in00000"+to_string(f)+".jpg", CV_LOAD_IMAGE_COLOR); else if(f<100) img = imread(PATH+"input/in0000"+to_string(f)+".jpg", CV_LOAD_IMAGE_COLOR); else if(f<1000) img = imread(PATH+"input/in000"+to_string(f)+".jpg", CV_LOAD_IMAGE_COLOR); else img = imread(PATH+"input/in00"+to_string(f)+".jpg", CV_LOAD_IMAGE_COLOR); auto t11 = std::chrono::high_resolution_clock::now(); Mat result = SVD_step(img, 6, 2, 5, 0.05, 0.02); imshow("imagen", result); imshow("input", img); waitKey(1); auto t12 = std::chrono::high_resolution_clock::now(); duration = std::chrono::duration_cast<std::chrono::milliseconds>(t12 - t11).count(); cout << "Frame: " << f <<" Time: " << duration << endl; } } void export_mat_excel(Mat img, string name) { ofstream myfile; myfile.open(name+".csv"); for(int i=0; i<img.rows; i++) { for(int j=0; j<img.cols; j++) { if(i==2 && j==1) { cout << "print: " << ((Scalar)img.at<float>(i,j))[0] << endl; } myfile << ((Scalar)img.at<float>(i, j))[0]; myfile << ","; } myfile << "\n"; } myfile.close(); //waitKey(5000); } //Extrae la matriz de valores singulares SVD (s[1]+s[2])/s[0] //intensity_fr with 0 /*void extract_LSBP(Mat frame, Mat &r_lsbp, int tau=0.05) { Mat intensity; cvtColor(frame, intensity, COLOR_BGR2GRAY); Mat intensity_fr = Mat::zeros(frame.rows+2, frame.cols+2, CV_8UC1); for(int i=1; i<intensity_fr.rows-1; i++) { for(int j=1; j<intensity_fr.cols-1; j++) { intensity_fr.at<uchar>(i,j) = intensity.at<uchar>(i-1,j-1); } } auto t11 = std::chrono::high_resolution_clock::now(); for(int i=1; i<intensity_fr.rows-1; i++) { for(int j=1; j<intensity_fr.cols-1; j++) { arma::mat m_svd; m_svd = {{((Scalar)intensity_fr.at<uchar>(i-1,j-1))[0], ((Scalar)intensity_fr.at<uchar>(i-1,j))[0], ((Scalar)intensity_fr.at<uchar>(i-1,j+1))[0]}, {((Scalar)intensity_fr.at<uchar>(i,j-1))[0], ((Scalar)intensity_fr.at<uchar>(i,j))[0], ((Scalar)intensity_fr.at<uchar>(i,j+1))[0]}, {((Scalar)intensity_fr.at<uchar>(i+1,j-1))[0], ((Scalar)intensity_fr.at<uchar>(i+1,j))[0], ((Scalar)intensity_fr.at<uchar>(i+1,j+1))[0]}}; r_lsbp.at<float>(i,j) = _SVD(m_svd); } } auto t12 = std::chrono::high_resolution_clock::now(); intensity_fr.release(); }*/ void extract_LSBP(Mat frame, Mat &r_lsbp, int tau=0.05) { Mat intensity; cvtColor(frame, intensity, COLOR_BGR2GRAY); Mat intensity_fr = Mat::zeros(frame.rows+2, frame.cols+2, CV_8UC1); for(int i=1; i<intensity_fr.rows-1; i++) { for(int j=1; j<intensity_fr.cols-1; j++) { intensity_fr.at<uchar>(i,j) = intensity.at<uchar>(i-1,j-1); } } global_intensity_fr = intensity_fr.clone(); global_output = r_lsbp.clone(); part = 0; pthread_t threads[THREAD_MAX]; for (int i = 0; i < THREAD_MAX; i++) pthread_create(&threads[i], NULL, LSBP1_parallel, (void*)NULL); //quick_sort(aa.begin(), aa.end()); for (int i = 0; i < THREAD_MAX; i++) pthread_join(threads[i], NULL); r_lsbp = global_output.clone(); intensity_fr.release(); } void* LSBP1_parallel(void* arg) { int thread_part = part++; for(int k=1; k<((global_intensity_fr.rows-2)/THREAD_MAX)+1; k++) { int i = ((global_intensity_fr.rows-2)/THREAD_MAX)*thread_part+k; for(int j=1; j<global_intensity_fr.cols-1; j++) { arma::mat m_svd; m_svd = {{((Scalar)global_intensity_fr.at<uchar>(i-1,j-1))[0], ((Scalar)global_intensity_fr.at<uchar>(i-1,j))[0], ((Scalar)global_intensity_fr.at<uchar>(i-1,j+1))[0]}, {((Scalar)global_intensity_fr.at<uchar>(i,j-1))[0], ((Scalar)global_intensity_fr.at<uchar>(i,j))[0], ((Scalar)global_intensity_fr.at<uchar>(i,j+1))[0]}, {((Scalar)global_intensity_fr.at<uchar>(i+1,j-1))[0], ((Scalar)global_intensity_fr.at<uchar>(i+1,j))[0], ((Scalar)global_intensity_fr.at<uchar>(i+1,j+1))[0]}}; global_output.at<float>(i,j) = _SVD(m_svd); } } } Mat SVD_init(Mat frame, int samples) { Mat svd = Mat::zeros(width+2, heigth+2, CV_32FC1); extract_LSBP(frame, svd, 0.05); /*ofstream myfile; myfile.open("example.csv"); for(int i=0; i<svd.rows; i++) { for(int j=0; j<svd.cols; j++) { if(i==2 && j==1) cout << "print: " << ((Scalar)svd.at<double>(i,j))[0] << endl; myfile << ((Scalar)svd.at<double>(i, j))[0]; myfile << ","; } myfile << "\n"; } imshow("imagen", svd); //cout << "-> " << ((Scalar)mask.at<uchar>(15, j))[0] << endl; myfile.close(); waitKey(5000);*/ samples_lsbp.push_back(svd); //cout << "Impr2" << endl; samples_frame.push_back(frame); //cout << "Impr3" << endl; int i0, j0; #pragma omp parallell for for(int k=1; k<samples;k++) { lsbp = svd.clone(); fr = frame.clone(); for(int i=0; i<frame.rows; i++) { for(int j=0; j<frame.cols; j++) { i0 = clip(i,10,frame.rows-10,10); j0 = clip(j,10,frame.cols-10,10); fr.at<Vec3b>(i0,j0) = frame.at<Vec3b>(i,j); } } extract_LSBP(fr, lsbp, 0.05); samples_lsbp.push_back(lsbp); samples_frame.push_back(fr); lsbp.release(); fr.release(); } return frame; } void init_zeros_change_lsbp() { Mat lsbp = Mat::zeros(width+2, heigth+2, CV_32FC1); list<Mat>::iterator next_lsbp; next_lsbp = samples_change_lsbp.begin(); while(next_lsbp != samples_change_lsbp.end()) { (*next_lsbp) = lsbp.clone(); next_lsbp++; } } void init_change_lsbp() { Mat lsbp = Mat::zeros(width+2, heigth+2, CV_32FC1); for(int i=0; i<10; i++) { samples_change_lsbp.push_back(lsbp); } } //threshold HR PY // matches threshold PY Mat SVD_step(Mat frame, int threshold=4, int matches=2, int Rscale=5, double Rlr=0.05, double Tlr=0.02) { //Mat svd_fr = Mat::zeros(frame.rows+2, frame.cols+2, CV_32FC1); Mat svd_fr = Mat::zeros(width+2, heigth+2, CV_32FC1); //auto t11 = std::chrono::high_resolution_clock::now(); //extract_LSBP(frame, svd_fr, 0.05); extract_LSBP_cuda(frame, svd_fr); //auto t12 = std::chrono::high_resolution_clock::now(); //cout << "LSBP STEP: "<<std::chrono::duration_cast<std::chrono::milliseconds>(t12 - t11).count() << endl; Mat mask = Mat::zeros(frame.rows, frame.cols, CV_8UC1); //Mat white = Mat::ones(1,1, CV_8UC1)*255; //#pragma omp parallell for auto t21 = std::chrono::high_resolution_clock::now(); for(int i=0; i<frame.rows; i++) { for(int j=0; j<frame.cols; j++) { list<Mat>::iterator next_frame; next_frame = samples_frame.begin(); list<Mat>::iterator next_lsbp; next_lsbp = samples_lsbp.begin(); int samples_matches = 0; double min_distance_sum = 0; //double L1_distance_min = 1000000; while(next_lsbp != samples_lsbp.end()) { double L1_distance = get_distance_L1(((Scalar)frame.at<Vec3b>(i, j))[0], ((Scalar)(*next_frame).at<Vec3b>(i, j))[0], ((Scalar)frame.at<Vec3b>(i, j))[1], ((Scalar)(*next_frame).at<Vec3b>(i, j))[1], ((Scalar)frame.at<Vec3b>(i, j))[2], ((Scalar)(*next_frame).at<Vec3b>(i, j))[2]); double min_distance = min_distance_L1(((Scalar)frame.at<Vec3b>(i, j))[0], ((Scalar)(*next_frame).at<Vec3b>(i, j))[0], ((Scalar)frame.at<Vec3b>(i, j))[1], ((Scalar)(*next_frame).at<Vec3b>(i, j))[1], ((Scalar)frame.at<Vec3b>(i, j))[2], ((Scalar)(*next_frame).at<Vec3b>(i, j))[2]); int d_hamming = Hamming_distance(svd_fr, *next_lsbp, i+1, j+1, 0.05); min_distance_sum += min_distance; //UPDATE R //D.at<float>(i,j) = L1_distance; //if((d_hamming < (threshold))) if((L1_distance < R.at<float>(i, j)) && (d_hamming < (threshold))) { samples_matches++; } next_frame++; next_lsbp++; } //UPDATE R(x) if(R.at<float>(i, j) > ((min_distance_sum/10)*Rscale)) { if(R.at<float>(i, j)*(1-Rlr) < 18) R.at<float>(i, j) = 18; else R.at<float>(i, j) = R.at<float>(i, j)*(1-Rlr); } else { if(R.at<float>(i, j)*(1+Rlr) > 500) R.at<float>(i, j) = 500; else R.at<float>(i, j) = R.at<float>(i, j)*(1+Rlr); } if(samples_matches < matches) { mask.at<uchar>(i, j) = 255;//White, Black 0 //UPDATE T(X) if(T.at<float>(i, j)+(1/(min_distance_sum/10)) < 200) T.at<float>(i, j) = T.at<float>(i, j)+(1/(min_distance_sum/10));//IF FOREGROUND else T.at<float>(i, j) = 200; } else { //UPDATE T(X) if(T.at<float>(i, j)-(0.05/(min_distance_sum/10)) > 2) T.at<float>(i, j) = T.at<float>(i, j)-(0.05/(min_distance_sum/10));//IF BACKGROUND else T.at<float>(i, j) = 2; //UPDATE B(X) because mask(i,j) is 0 if((rand()%200) < (200/T.at<float>(i, j))) { int random = rand()%10; list<Mat>::iterator next_frame_update; next_frame_update = samples_frame.begin(); list<Mat>::iterator next_lsbp_update; next_lsbp_update = samples_lsbp.begin(); for(int k=0; k<random; k++) { next_frame_update++; next_lsbp_update++; } (*next_frame_update).at<Vec3b>(i, j) = frame.at<Vec3b>(i,j); } } } } auto t22 = std::chrono::high_resolution_clock::now(); //cout << "COMPARE: "<<std::chrono::duration_cast<std::chrono::milliseconds>(t22 - t21).count() << endl; update_samples_lsbp(); init_zeros_change_lsbp(); return mask; } /*Mat SVD_step(Mat frame, int threshold=4, int matches=2, int Rscale=5, double Rlr=0.05, double Tlr=0.02) { //Mat svd_fr = Mat::zeros(frame.rows+2, frame.cols+2, CV_32FC1); Mat svd_fr = Mat::zeros(width+2, heigth+2, CV_32FC1); extract_LSBP(frame, svd_fr, 0.05); Mat mask = Mat::zeros(frame.rows, frame.cols, CV_8UC1); //Mat white = Mat::ones(1,1, CV_8UC1)*255; //#pragma omp parallell for global_frame = frame.clone(); global_svd = svd_fr.clone(); global_mask = mask.clone(); part = 0; pthread_t threads[THREAD_MAX]; for (int i = 0; i < THREAD_MAX; i++) pthread_create(&threads[i], NULL, SVD_step_parallell, (void*)NULL); //quick_sort(aa.begin(), aa.end()); for (int i = 0; i < THREAD_MAX; i++) pthread_join(threads[i], NULL); update_samples_lsbp(); init_zeros_change_lsbp(); mask = global_mask.clone(); return mask; } */ /*void* SVD_step_parallell(void * arg) { int threshold = 6; int matches = 2; int Rscale = 5; double Rlr = 0.05; double Tlr = 0.02; for(int k=0; k<global_frame.rows/THREAD_MAX; k++) { int i=(global_frame.rows)/THREAD_MAX+k; for(int j=0; j<global_frame.cols; j++) { list<Mat>::iterator next_frame; next_frame = samples_frame.begin(); list<Mat>::iterator next_lsbp; next_lsbp = samples_lsbp.begin(); int samples_matches = 0; double min_distance_sum = 0; //double L1_distance_min = 1000000; while(next_lsbp != samples_lsbp.end()) { double L1_distance = get_distance_L1(((Scalar)global_frame.at<Vec3b>(i, j))[0], ((Scalar)(*next_frame).at<Vec3b>(i, j))[0], ((Scalar)global_frame.at<Vec3b>(i, j))[1], ((Scalar)(*next_frame).at<Vec3b>(i, j))[1], ((Scalar)global_frame.at<Vec3b>(i, j))[2], ((Scalar)(*next_frame).at<Vec3b>(i, j))[2]); double min_distance = min_distance_L1(((Scalar)global_frame.at<Vec3b>(i, j))[0], ((Scalar)(*next_frame).at<Vec3b>(i, j))[0], ((Scalar)global_frame.at<Vec3b>(i, j))[1], ((Scalar)(*next_frame).at<Vec3b>(i, j))[1], ((Scalar)global_frame.at<Vec3b>(i, j))[2], ((Scalar)(*next_frame).at<Vec3b>(i, j))[2]); int d_hamming = Hamming_distance(global_svd, *next_lsbp, i+1, j+1, 0.05); min_distance_sum += min_distance; //UPDATE R D.at<float>(i,j) = L1_distance; //if((d_hamming < (threshold))) if((L1_distance < R.at<float>(i, j)) && (d_hamming < (threshold))) { samples_matches++; } next_frame++; next_lsbp++; } //UPDATE R(x) if(R.at<float>(i, j) > ((min_distance_sum/10)*Rscale)) { if(R.at<float>(i, j)*(1-Rlr) < 18) R.at<float>(i, j) = 18; else R.at<float>(i, j) = R.at<float>(i, j)*(1-Rlr); } else { if(R.at<float>(i, j)*(1+Rlr) > 500) R.at<float>(i, j) = 500; else R.at<float>(i, j) = R.at<float>(i, j)*(1+Rlr); } if(samples_matches < matches) { global_mask.at<uchar>(i, j) = 255;//White, Black 0 //UPDATE T(X) if(T.at<uchar>(i, j)+(1/(min_distance_sum/10)) < 200) T.at<uchar>(i, j) = T.at<uchar>(i, j)+(1/(min_distance_sum/10));//IF FOREGROUND else T.at<uchar>(i, j) = 200; } else { //UPDATE T(X) if(T.at<uchar>(i, j)-(0.05/(min_distance_sum/10)) > 2) T.at<uchar>(i, j) = T.at<uchar>(i, j)-(0.05/(min_distance_sum/10));//IF BACKGROUND else T.at<uchar>(i, j) = 2; //UPDATE B(X) because mask(i,j) is 0 if((rand()%200) < (200/T.at<uchar>(i, j))) { int random = rand()%10; list<Mat>::iterator next_frame_update; next_frame_update = samples_frame.begin(); list<Mat>::iterator next_lsbp_update; next_lsbp_update = samples_lsbp.begin(); list<Mat>::iterator next_change_lsbp_update; next_change_lsbp_update = samples_change_lsbp.begin(); for(int k=0; k<random; k++) { next_frame_update++; next_lsbp_update++; next_change_lsbp_update++; } (*next_frame_update).at<Vec3b>(i, j) = global_frame.at<Vec3b>(i,j); (*next_change_lsbp_update).at<float>(i+1, j+1) = 1; } } } } }*/ double get_distance_L1(double b1, double b2, double g1, double g2, double r1, double r2) { return sqrt(pow((b1-b2),2)+pow((g1-g2),2)+pow((r1-r2),2)); } double min_distance_L1(double b1, double b2, double g1, double g2, double r1, double r2) { if(abs(b1-b2) < abs(g1-g2)) { if(abs(r1-r2) < abs(b1-b2)) { return abs(r1-r2); } else return abs(b1-b2); } else { if(abs(r1-r2) < abs(g1-g2)) { return abs(r1-r2); } else return abs(g1-g2); } } /*void update_samples_lsbp() { list<Mat>::iterator next_frame; next_frame = samples_frame.begin(); list<Mat>::iterator next_lsbp; next_lsbp = samples_lsbp.begin(); int samples_matches = 0; while(next_lsbp != samples_lsbp.end()) { Mat svd = Mat::zeros(width+2, heigth+2, CV_32FC1); extract_LSBP(*next_frame, svd, 0.05); *next_lsbp = svd.clone(); next_frame++; next_lsbp++; } } */ /*void extract_LSBP_v2(Mat frame, Mat frame_change, Mat last_lsbp,Mat &r_lsbp, int tau=0.05) { Mat intensity; cvtColor(frame, intensity, COLOR_BGR2GRAY); Mat intensity_fr = Mat::zeros(frame.rows+2, frame.cols+2, CV_8UC1); for(int i=1; i<intensity_fr.rows-1; i++) { for(int j=1; j<intensity_fr.cols-1; j++) { intensity_fr.at<uchar>(i,j) = intensity.at<uchar>(i-1,j-1); } } for(int i=1; i<intensity_fr.rows-1; i++) { for(int j=1; j<intensity_fr.cols-1; j++) { if(validate_change(frame_change.at<float>(i-1,j-1), frame_change.at<float>(i-1,j), frame_change.at<float>(i-1,j+1), frame_change.at<float>(i,j-1), frame_change.at<float>(i,j), frame_change.at<float>(i,j+1), frame_change.at<float>(i+1,j-1), frame_change.at<float>(i+1,j), frame_change.at<float>(i+1,j+1))) { arma::mat m_svd; m_svd = {{((Scalar)intensity_fr.at<uchar>(i-1,j-1))[0], ((Scalar)intensity_fr.at<uchar>(i-1,j))[0], ((Scalar)intensity_fr.at<uchar>(i-1,j+1))[0]}, {((Scalar)intensity_fr.at<uchar>(i,j-1))[0], ((Scalar)intensity_fr.at<uchar>(i,j))[0], ((Scalar)intensity_fr.at<uchar>(i,j+1))[0]}, {((Scalar)intensity_fr.at<uchar>(i+1,j-1))[0], ((Scalar)intensity_fr.at<uchar>(i+1,j))[0], ((Scalar)intensity_fr.at<uchar>(i+1,j+1))[0]}}; r_lsbp.at<float>(i,j) = _SVD(m_svd); } else { r_lsbp.at<float>(i,j) = last_lsbp.at<float>(i,j); } } } intensity_fr.release(); }*/ void extract_LSBP_v2(Mat frame, Mat frame_change, Mat last_lsbp,Mat &r_lsbp, int tau=0.05) { Mat intensity; cvtColor(frame, intensity, COLOR_BGR2GRAY); Mat intensity_fr = Mat::zeros(frame.rows+2, frame.cols+2, CV_8UC1); for(int i=1; i<intensity_fr.rows-1; i++) { for(int j=1; j<intensity_fr.cols-1; j++) { intensity_fr.at<uchar>(i,j) = intensity.at<uchar>(i-1,j-1); } } part=0; global_intensity_fr = intensity_fr.clone(); global_change_frame = frame_change.clone(); global_last_lsbp = last_lsbp.clone(); global_output = r_lsbp.clone(); pthread_t threads[THREAD_MAX]; for (int i = 0; i < THREAD_MAX; i++) pthread_create(&threads[i], NULL, LSBP_parallel, (void*)NULL); //quick_sort(aa.begin(), aa.end()); for (int i = 0; i < THREAD_MAX; i++) pthread_join(threads[i], NULL); r_lsbp = global_output.clone(); intensity_fr.release(); } void* LSBP_parallel(void* arg) { int thread_part = part++; /*for(int i=0; i<global_mat.rows/THREAD_MAX; i++) { for(int j=0; j<global_mat.cols; j++) { global_mat.at<float>((global_mat.rows/THREAD_MAX)*thread_part+i,j) = 1; } }*/ for(int k=1; k<((global_intensity_fr.rows-2)/THREAD_MAX)+1; k++) { int i = ((global_intensity_fr.rows-2)/THREAD_MAX)*thread_part+k; //cout << "thread: " << thread_part <<" i: " << i << endl; for(int j=1; j<global_intensity_fr.cols-1; j++) { if(validate_change(global_change_frame.at<float>(i-1,j-1), global_change_frame.at<float>(i-1,j), global_change_frame.at<float>(i-1,j+1), global_change_frame.at<float>(i,j-1), global_change_frame.at<float>(i,j), global_change_frame.at<float>(i,j+1), global_change_frame.at<float>(i+1,j-1), global_change_frame.at<float>(i+1,j), global_change_frame.at<float>(i+1,j+1))) { arma::mat m_svd; m_svd = {{((Scalar)global_intensity_fr.at<uchar>(i-1,j-1))[0], ((Scalar)global_intensity_fr.at<uchar>(i-1,j))[0], ((Scalar)global_intensity_fr.at<uchar>(i-1,j+1))[0]}, {((Scalar)global_intensity_fr.at<uchar>(i,j-1))[0], ((Scalar)global_intensity_fr.at<uchar>(i,j))[0], ((Scalar)global_intensity_fr.at<uchar>(i,j+1))[0]}, {((Scalar)global_intensity_fr.at<uchar>(i+1,j-1))[0], ((Scalar)global_intensity_fr.at<uchar>(i+1,j))[0], ((Scalar)global_intensity_fr.at<uchar>(i+1,j+1))[0]}}; global_output.at<float>(i,j) = _SVD(m_svd); } else { global_output.at<float>(i,j) = global_last_lsbp.at<float>(i,j); } } } } bool validate_change(float i00, float i01, float i02, float i10, float i11, float i12, float i20, float i21, float i22) { if(i00 == 1 || i01 == 1 ||i02 == 1 ||i10 == 1 ||i11 == 1 ||i12 == 1 ||i20 == 1 ||i21 == 1 ||i22 == 1) return true; else return false; } void update_samples_lsbp() { list<Mat>::iterator next_frame; next_frame = samples_frame.begin(); list<Mat>::iterator next_lsbp; next_lsbp = samples_lsbp.begin(); list<Mat>::iterator next_lsbp_change; next_lsbp_change = samples_change_lsbp.begin(); //int samples_matches = 0; while(next_lsbp != samples_lsbp.end()) { Mat svd = Mat::zeros(width+2, heigth+2, CV_32FC1); //extract_LSBP_v2(*next_frame, *next_lsbp_change, *next_lsbp,svd, 0.05); extract_LSBP_cuda(*next_frame, svd); //extract_LSBP(*next_frame, svd, 0.05); *next_lsbp = svd.clone(); next_lsbp_change++; next_frame++; next_lsbp++; } } int Hamming_distance(Mat svd_frame, Mat svd_sample, int i, int j, double tau) { int hamming = 0; //if((abs((svd_frame.at<double>(i,j))-(svd_frame.at<double>(i-1,j-1))) < tau)) if((abs((svd_frame.at<float>(i,j))-(svd_frame.at<float>(i-1,j-1))) < tau) != (abs((svd_sample.at<float>(i,j))-(svd_sample.at<float>(i-1,j-1))) < tau)) { hamming++; } if((abs(svd_frame.at<float>(i,j)-svd_frame.at<float>(i-1,j)) < tau) != (abs(svd_sample.at<float>(i,j)-svd_sample.at<float>(i-1,j)) < tau)) { hamming++; } if((abs(svd_frame.at<float>(i,j)-svd_frame.at<float>(i-1,j+1)) < tau) != (abs(svd_sample.at<float>(i,j)-svd_sample.at<float>(i-1,j+1)) < tau)) { hamming++; } if((abs(svd_frame.at<float>(i,j)-svd_frame.at<float>(i,j-1)) < tau) != (abs(svd_sample.at<float>(i,j)-svd_sample.at<float>(i,j-1)) < tau)) { hamming++; } if((abs(svd_frame.at<float>(i,j)-svd_frame.at<float>(i,j+1)) < tau) != (abs(svd_sample.at<float>(i,j)-svd_sample.at<float>(i,j+1)) < tau)) { hamming++; } if((abs(svd_frame.at<float>(i,j)-svd_frame.at<float>(i+1,j-1)) < tau) != (abs(svd_sample.at<float>(i,j)-svd_sample.at<float>(i+1,j-1)) < tau)) { hamming++; } if((abs(svd_frame.at<float>(i,j)-svd_frame.at<float>(i+1,j)) < tau) != (abs(svd_sample.at<float>(i,j)-svd_sample.at<float>(i+1,j)) < tau)) { hamming++; } if((abs(svd_frame.at<float>(i,j)-svd_frame.at<float>(i+1,j+1)) < tau) != (abs(svd_sample.at<float>(i,j)-svd_sample.at<float>(i+1,j+1)) < tau)) { hamming++; } return hamming; } //return calcular los valores singulares y retorna (s[2]+s[1])/s[0] double _SVD(arma::mat matriz) { arma::mat U2, V2; arma::vec w2; arma::svd(U2, w2, V2, matriz); return ((w2[2]+w2[1])/w2[0]); } int clip(int i, int inferior, int superior, int val_range) { int i0; if(i<inferior) { i0 = rand()%val_range-rand()%val_range+inferior; } else { if(i>superior) { i0 = rand()%val_range-rand()%val_range+superior; } else { i0 = rand()%val_range-rand()%val_range+i; } } return i0; } void extract_LSBP_cuda(Mat frame, Mat &output) { Mat intensity; cvtColor(frame, intensity, COLOR_BGR2GRAY); //Mat intensity_fr = Mat::zeros(frame.rows+2, frame.cols+2, CV_8UC1); /*for(int i=1; i<intensity_fr.rows-1; i++) { for(int j=1; j<intensity_fr.cols-1; j++) { intensity_fr.at<uchar>(i,j) = intensity.at<uchar>(i-1,j-1); } }*/ //int filas = 1080; //int columnas = 1920; int filas = frame.rows+2; int columnas = frame.cols+2; float a[filas][columnas]; float *dev_a; float *dev_c; for(int i=1; i<filas-1; i++) { //cont = 0; for(int j=1; j<columnas-1; j++) { a[i][j] = ((Scalar)intensity.at<uchar>(i-1, j-1))[0]; } } // auto t11 = std::chrono::high_resolution_clock::now(); hipMalloc((void**) &dev_a, filas*columnas*sizeof(float)); hipMalloc((void**) &dev_c, filas*columnas*sizeof(float)); hipMemcpy(dev_a, a, filas*columnas*sizeof(float), hipMemcpyHostToDevice); dim3 dimThreadsBloque(Threads, Threads); float BFloat = (float) columnas / (float) Threads; int B = (int) ceil(BFloat); // El grid tendr B nmero de bloques en x y y dim3 dimBloques(B, B); hipLaunchKernelGGL(( add), dim3(dimBloques), dim3(dimThreadsBloque), 0, 0, dev_a, dev_c, filas, columnas); hipMemcpy(a, dev_c, filas*columnas*sizeof(float), hipMemcpyDeviceToHost); //auto t12 = std::chrono::high_resolution_clock::now(); hipFree(dev_a); hipFree(dev_c); for(int i=0; i<filas; i++) { for(int j=0; j<columnas; j++) { ((Scalar)output.at<uchar>(i,j))[0] = a[i][j]; } } //cout << std::chrono::duration_cast<std::chrono::milliseconds>(t12 - t11).count() << endl; }
e69b666110d25ac93023e77f3357351f501b68ec.cu
#include <iostream> #include <string.h> #include <experimental/filesystem> #include <list> #include "opencv2/imgcodecs.hpp" #include "opencv2/highgui.hpp" #include "opencv2/imgproc.hpp" #include <armadillo> #include <chrono> #include <time.h> #include <omp.h> #include "svd3_cuda.h" #include "stdio.h" using namespace std; using namespace cv; namespace fs = std::experimental::filesystem::v1; #define THREAD_MAX 4 #define Threads 32 void extract_LSBP(Mat frame, Mat &output, int tau); void video(string); void cdnet(string, int); void extract_LSBP_v2(Mat frame, Mat change_frame, Mat last_lsbp, Mat &output, int tau); Mat SVD_init(Mat frame, int samples); Mat SVD_step(Mat, int, int, int, double, double); double _SVD(arma::mat matriz);// return the singular values sum (s[1]+s[2])/s[0] int clip(int i, int inferior, int superior, int val_range); int Hamming_distance(Mat svd_frame, Mat svd_sample, int i, int j, double tau); void export_mat_excel(Mat img, string name); void update_samples_lsbp(); double get_distance_L1(double b1, double b2, double g1, double g2, double r1, double r2); double min_distance_L1(double b1, double b2, double g1, double g2, double r1, double r2); void init_change_lsbp(); void init_zeros_change_lsbp(); bool validate_change(float, float, float, float, float, float, float, float, float); void* LSBP_parallel(void* arg); void* LSBP1_parallel(void* arg); void* SVD_step_parallell(void * arg); void extract_LSBP_cuda(Mat frame, Mat &output); __global__ void add(float *m1, float *m3, int filas, int columnas) { int col = blockIdx.x*blockDim.x+threadIdx.x; int fil = blockIdx.y*blockDim.y+threadIdx.y; int col_min = col-1; int col_max = col+1; int fil_min = fil-1; int fil_max = fil+1; if(col < columnas && fil < filas && col_min >= 0 && fil_min >= 0 && fil_max < (filas) && col_max < (columnas)) { int indexi0j0 = (columnas*fil_min)+col_min; int indexi0j1 = (columnas*fil_min)+col; int indexi0j2 = (columnas*fil_min)+col_max; int indexi1j0 = (columnas*fil)+col_min; int indexij = (columnas*fil)+col; int indexi1j2 = (columnas*fil)+col_max; int indexi2j0 = (columnas*fil_max)+col_min; int indexi2j1 = (columnas*fil_max)+col; int indexi2j2 = (columnas*fil_max)+col_max; float u11, u12, u13, u21, u22, u23, u31, u32, u33; float s11, s12, s13, s21, s22, s23, s31, s32, s33; float v11, v12, v13, v21, v22, v23, v31, v32, v33; svd(m1[indexi0j0], m1[indexi0j1], m1[indexi0j2], m1[indexi1j0], m1[indexij], m1[indexi1j2], m1[indexi2j0], m1[indexi2j1], m1[indexi2j2], // output U u11, u12, u13, u21, u22, u23, u31, u32, u33, // output S s11, s12, s13, s21, s22, s23, s31, s32, s33, // output V v11, v12, v13, v21, v22, v23, v31, v32, v33); //m3[index] = m1[index]+m2[index]; /*m3[indexij] = m1[indexi0j0]+m1[indexi0j1]+m1[indexi0j2]+ m1[indexi1j0]+m1[indexij]+m1[indexi1j2]+ m1[indexi2j0]+m1[indexi2j1]+m1[indexi2j2];*/ //m3[indexij] = s11+s22+s33; m3[indexij] = (s22+s33)/s11; } } Mat global_intensity_fr, global_change_frame, global_last_lsbp, global_output; Mat global_frame, global_mask, global_svd; int part=0; Mat D, fr, lsbp; Mat R, T; list<Mat> samples_lsbp; list<Mat> samples_frame; list<Mat> samples_change_lsbp; int heigth, width; int main() { string pathVideoName = "sanPablo/SanPabloVideo3.mp4"; string pathCDnet = "office/"; bool isVideo = false; if(isVideo) video(pathVideoName); else { int startFrame = 570; cdnet(pathCDnet,startFrame); } return 0; } void video(string pathVideo) { string PATH = "peopleInShade/"; srand(time(NULL)); auto duration =0; int samples = 10; Mat img; //img = imread(PATH+"input/in000001.jpg", CV_LOAD_IMAGE_COLOR); VideoCapture cap(pathVideo); int i=0;// Mat ones = Mat::ones(2, 3, CV_32FC1)*0.2; namedWindow("imagen", WINDOW_AUTOSIZE); namedWindow("input", WINDOW_AUTOSIZE); //imshow("imagen", img); //Mat result = SVD_init(img, samples); waitKey(1); init_change_lsbp(); if(!cap.isOpened()) { cout << "Error opening video stream" << endl; } while(1) { Mat frame; cap >> frame; if(i==0) { heigth = frame.cols; width = frame.rows; R = Mat::ones(width, heigth, CV_32FC1)*30.0; D = Mat::ones(width, heigth, CV_32FC1)*0.0; T = Mat::ones(width, heigth, CV_32FC1)*0.08; cout << "heigth: " << R.cols << endl; cout << "width: " << width << endl; Mat result = SVD_init(frame, samples); } else { //cout << "=========: " << i << endl; //Only to read auto t11 = std::chrono::high_resolution_clock::now(); //waitKey(5000); //cout << "Step0" << endl; Mat result = SVD_step(frame, 6, 2, 5, 0.05, 0.02); imshow("imagen", result); imshow("input", frame); //cout << "Step1" << endl; char c=(char)waitKey(1); if(c==1) break; auto t12 = std::chrono::high_resolution_clock::now(); duration = std::chrono::duration_cast<std::chrono::milliseconds>(t12 - t11).count(); cout << "Frame: " << i <<" Time: " << duration << endl; } i++; } } void cdnet(string PATH, int starFrame) { //string PATH = "highway/"; srand(time(NULL)); auto duration =0; int samples = 10; Mat img; img = imread(PATH+"input/in000001.jpg", CV_LOAD_IMAGE_COLOR); heigth = img.cols; width = img.rows; Mat ones = Mat::ones(2, 3, CV_32FC1)*0.2; R = Mat::ones(width, heigth, CV_32FC1)*30.0; D = Mat::ones(width, heigth, CV_32FC1)*0.0; T = Mat::ones(width, heigth, CV_32FC1)*0.08; namedWindow("imagen", WINDOW_AUTOSIZE); namedWindow("input", WINDOW_AUTOSIZE); imshow("imagen", img); Mat result = SVD_init(img, samples); waitKey(1); init_change_lsbp(); for(int f=starFrame; f<=1699; f++) { //Only to read if(f<10) img = imread(PATH+"input/in00000"+to_string(f)+".jpg", CV_LOAD_IMAGE_COLOR); else if(f<100) img = imread(PATH+"input/in0000"+to_string(f)+".jpg", CV_LOAD_IMAGE_COLOR); else if(f<1000) img = imread(PATH+"input/in000"+to_string(f)+".jpg", CV_LOAD_IMAGE_COLOR); else img = imread(PATH+"input/in00"+to_string(f)+".jpg", CV_LOAD_IMAGE_COLOR); auto t11 = std::chrono::high_resolution_clock::now(); Mat result = SVD_step(img, 6, 2, 5, 0.05, 0.02); imshow("imagen", result); imshow("input", img); waitKey(1); auto t12 = std::chrono::high_resolution_clock::now(); duration = std::chrono::duration_cast<std::chrono::milliseconds>(t12 - t11).count(); cout << "Frame: " << f <<" Time: " << duration << endl; } } void export_mat_excel(Mat img, string name) { ofstream myfile; myfile.open(name+".csv"); for(int i=0; i<img.rows; i++) { for(int j=0; j<img.cols; j++) { if(i==2 && j==1) { cout << "print: " << ((Scalar)img.at<float>(i,j))[0] << endl; } myfile << ((Scalar)img.at<float>(i, j))[0]; myfile << ","; } myfile << "\n"; } myfile.close(); //waitKey(5000); } //Extrae la matriz de valores singulares SVD (s[1]+s[2])/s[0] //intensity_fr with 0 /*void extract_LSBP(Mat frame, Mat &r_lsbp, int tau=0.05) { Mat intensity; cvtColor(frame, intensity, COLOR_BGR2GRAY); Mat intensity_fr = Mat::zeros(frame.rows+2, frame.cols+2, CV_8UC1); for(int i=1; i<intensity_fr.rows-1; i++) { for(int j=1; j<intensity_fr.cols-1; j++) { intensity_fr.at<uchar>(i,j) = intensity.at<uchar>(i-1,j-1); } } auto t11 = std::chrono::high_resolution_clock::now(); for(int i=1; i<intensity_fr.rows-1; i++) { for(int j=1; j<intensity_fr.cols-1; j++) { arma::mat m_svd; m_svd = {{((Scalar)intensity_fr.at<uchar>(i-1,j-1))[0], ((Scalar)intensity_fr.at<uchar>(i-1,j))[0], ((Scalar)intensity_fr.at<uchar>(i-1,j+1))[0]}, {((Scalar)intensity_fr.at<uchar>(i,j-1))[0], ((Scalar)intensity_fr.at<uchar>(i,j))[0], ((Scalar)intensity_fr.at<uchar>(i,j+1))[0]}, {((Scalar)intensity_fr.at<uchar>(i+1,j-1))[0], ((Scalar)intensity_fr.at<uchar>(i+1,j))[0], ((Scalar)intensity_fr.at<uchar>(i+1,j+1))[0]}}; r_lsbp.at<float>(i,j) = _SVD(m_svd); } } auto t12 = std::chrono::high_resolution_clock::now(); intensity_fr.release(); }*/ void extract_LSBP(Mat frame, Mat &r_lsbp, int tau=0.05) { Mat intensity; cvtColor(frame, intensity, COLOR_BGR2GRAY); Mat intensity_fr = Mat::zeros(frame.rows+2, frame.cols+2, CV_8UC1); for(int i=1; i<intensity_fr.rows-1; i++) { for(int j=1; j<intensity_fr.cols-1; j++) { intensity_fr.at<uchar>(i,j) = intensity.at<uchar>(i-1,j-1); } } global_intensity_fr = intensity_fr.clone(); global_output = r_lsbp.clone(); part = 0; pthread_t threads[THREAD_MAX]; for (int i = 0; i < THREAD_MAX; i++) pthread_create(&threads[i], NULL, LSBP1_parallel, (void*)NULL); //quick_sort(aa.begin(), aa.end()); for (int i = 0; i < THREAD_MAX; i++) pthread_join(threads[i], NULL); r_lsbp = global_output.clone(); intensity_fr.release(); } void* LSBP1_parallel(void* arg) { int thread_part = part++; for(int k=1; k<((global_intensity_fr.rows-2)/THREAD_MAX)+1; k++) { int i = ((global_intensity_fr.rows-2)/THREAD_MAX)*thread_part+k; for(int j=1; j<global_intensity_fr.cols-1; j++) { arma::mat m_svd; m_svd = {{((Scalar)global_intensity_fr.at<uchar>(i-1,j-1))[0], ((Scalar)global_intensity_fr.at<uchar>(i-1,j))[0], ((Scalar)global_intensity_fr.at<uchar>(i-1,j+1))[0]}, {((Scalar)global_intensity_fr.at<uchar>(i,j-1))[0], ((Scalar)global_intensity_fr.at<uchar>(i,j))[0], ((Scalar)global_intensity_fr.at<uchar>(i,j+1))[0]}, {((Scalar)global_intensity_fr.at<uchar>(i+1,j-1))[0], ((Scalar)global_intensity_fr.at<uchar>(i+1,j))[0], ((Scalar)global_intensity_fr.at<uchar>(i+1,j+1))[0]}}; global_output.at<float>(i,j) = _SVD(m_svd); } } } Mat SVD_init(Mat frame, int samples) { Mat svd = Mat::zeros(width+2, heigth+2, CV_32FC1); extract_LSBP(frame, svd, 0.05); /*ofstream myfile; myfile.open("example.csv"); for(int i=0; i<svd.rows; i++) { for(int j=0; j<svd.cols; j++) { if(i==2 && j==1) cout << "print: " << ((Scalar)svd.at<double>(i,j))[0] << endl; myfile << ((Scalar)svd.at<double>(i, j))[0]; myfile << ","; } myfile << "\n"; } imshow("imagen", svd); //cout << "-> " << ((Scalar)mask.at<uchar>(15, j))[0] << endl; myfile.close(); waitKey(5000);*/ samples_lsbp.push_back(svd); //cout << "Impr2" << endl; samples_frame.push_back(frame); //cout << "Impr3" << endl; int i0, j0; #pragma omp parallell for for(int k=1; k<samples;k++) { lsbp = svd.clone(); fr = frame.clone(); for(int i=0; i<frame.rows; i++) { for(int j=0; j<frame.cols; j++) { i0 = clip(i,10,frame.rows-10,10); j0 = clip(j,10,frame.cols-10,10); fr.at<Vec3b>(i0,j0) = frame.at<Vec3b>(i,j); } } extract_LSBP(fr, lsbp, 0.05); samples_lsbp.push_back(lsbp); samples_frame.push_back(fr); lsbp.release(); fr.release(); } return frame; } void init_zeros_change_lsbp() { Mat lsbp = Mat::zeros(width+2, heigth+2, CV_32FC1); list<Mat>::iterator next_lsbp; next_lsbp = samples_change_lsbp.begin(); while(next_lsbp != samples_change_lsbp.end()) { (*next_lsbp) = lsbp.clone(); next_lsbp++; } } void init_change_lsbp() { Mat lsbp = Mat::zeros(width+2, heigth+2, CV_32FC1); for(int i=0; i<10; i++) { samples_change_lsbp.push_back(lsbp); } } //threshold HR PY // matches threshold PY Mat SVD_step(Mat frame, int threshold=4, int matches=2, int Rscale=5, double Rlr=0.05, double Tlr=0.02) { //Mat svd_fr = Mat::zeros(frame.rows+2, frame.cols+2, CV_32FC1); Mat svd_fr = Mat::zeros(width+2, heigth+2, CV_32FC1); //auto t11 = std::chrono::high_resolution_clock::now(); //extract_LSBP(frame, svd_fr, 0.05); extract_LSBP_cuda(frame, svd_fr); //auto t12 = std::chrono::high_resolution_clock::now(); //cout << "LSBP STEP: "<<std::chrono::duration_cast<std::chrono::milliseconds>(t12 - t11).count() << endl; Mat mask = Mat::zeros(frame.rows, frame.cols, CV_8UC1); //Mat white = Mat::ones(1,1, CV_8UC1)*255; //#pragma omp parallell for auto t21 = std::chrono::high_resolution_clock::now(); for(int i=0; i<frame.rows; i++) { for(int j=0; j<frame.cols; j++) { list<Mat>::iterator next_frame; next_frame = samples_frame.begin(); list<Mat>::iterator next_lsbp; next_lsbp = samples_lsbp.begin(); int samples_matches = 0; double min_distance_sum = 0; //double L1_distance_min = 1000000; while(next_lsbp != samples_lsbp.end()) { double L1_distance = get_distance_L1(((Scalar)frame.at<Vec3b>(i, j))[0], ((Scalar)(*next_frame).at<Vec3b>(i, j))[0], ((Scalar)frame.at<Vec3b>(i, j))[1], ((Scalar)(*next_frame).at<Vec3b>(i, j))[1], ((Scalar)frame.at<Vec3b>(i, j))[2], ((Scalar)(*next_frame).at<Vec3b>(i, j))[2]); double min_distance = min_distance_L1(((Scalar)frame.at<Vec3b>(i, j))[0], ((Scalar)(*next_frame).at<Vec3b>(i, j))[0], ((Scalar)frame.at<Vec3b>(i, j))[1], ((Scalar)(*next_frame).at<Vec3b>(i, j))[1], ((Scalar)frame.at<Vec3b>(i, j))[2], ((Scalar)(*next_frame).at<Vec3b>(i, j))[2]); int d_hamming = Hamming_distance(svd_fr, *next_lsbp, i+1, j+1, 0.05); min_distance_sum += min_distance; //UPDATE R //D.at<float>(i,j) = L1_distance; //if((d_hamming < (threshold))) if((L1_distance < R.at<float>(i, j)) && (d_hamming < (threshold))) { samples_matches++; } next_frame++; next_lsbp++; } //UPDATE R(x) if(R.at<float>(i, j) > ((min_distance_sum/10)*Rscale)) { if(R.at<float>(i, j)*(1-Rlr) < 18) R.at<float>(i, j) = 18; else R.at<float>(i, j) = R.at<float>(i, j)*(1-Rlr); } else { if(R.at<float>(i, j)*(1+Rlr) > 500) R.at<float>(i, j) = 500; else R.at<float>(i, j) = R.at<float>(i, j)*(1+Rlr); } if(samples_matches < matches) { mask.at<uchar>(i, j) = 255;//White, Black 0 //UPDATE T(X) if(T.at<float>(i, j)+(1/(min_distance_sum/10)) < 200) T.at<float>(i, j) = T.at<float>(i, j)+(1/(min_distance_sum/10));//IF FOREGROUND else T.at<float>(i, j) = 200; } else { //UPDATE T(X) if(T.at<float>(i, j)-(0.05/(min_distance_sum/10)) > 2) T.at<float>(i, j) = T.at<float>(i, j)-(0.05/(min_distance_sum/10));//IF BACKGROUND else T.at<float>(i, j) = 2; //UPDATE B(X) because mask(i,j) is 0 if((rand()%200) < (200/T.at<float>(i, j))) { int random = rand()%10; list<Mat>::iterator next_frame_update; next_frame_update = samples_frame.begin(); list<Mat>::iterator next_lsbp_update; next_lsbp_update = samples_lsbp.begin(); for(int k=0; k<random; k++) { next_frame_update++; next_lsbp_update++; } (*next_frame_update).at<Vec3b>(i, j) = frame.at<Vec3b>(i,j); } } } } auto t22 = std::chrono::high_resolution_clock::now(); //cout << "COMPARE: "<<std::chrono::duration_cast<std::chrono::milliseconds>(t22 - t21).count() << endl; update_samples_lsbp(); init_zeros_change_lsbp(); return mask; } /*Mat SVD_step(Mat frame, int threshold=4, int matches=2, int Rscale=5, double Rlr=0.05, double Tlr=0.02) { //Mat svd_fr = Mat::zeros(frame.rows+2, frame.cols+2, CV_32FC1); Mat svd_fr = Mat::zeros(width+2, heigth+2, CV_32FC1); extract_LSBP(frame, svd_fr, 0.05); Mat mask = Mat::zeros(frame.rows, frame.cols, CV_8UC1); //Mat white = Mat::ones(1,1, CV_8UC1)*255; //#pragma omp parallell for global_frame = frame.clone(); global_svd = svd_fr.clone(); global_mask = mask.clone(); part = 0; pthread_t threads[THREAD_MAX]; for (int i = 0; i < THREAD_MAX; i++) pthread_create(&threads[i], NULL, SVD_step_parallell, (void*)NULL); //quick_sort(aa.begin(), aa.end()); for (int i = 0; i < THREAD_MAX; i++) pthread_join(threads[i], NULL); update_samples_lsbp(); init_zeros_change_lsbp(); mask = global_mask.clone(); return mask; } */ /*void* SVD_step_parallell(void * arg) { int threshold = 6; int matches = 2; int Rscale = 5; double Rlr = 0.05; double Tlr = 0.02; for(int k=0; k<global_frame.rows/THREAD_MAX; k++) { int i=(global_frame.rows)/THREAD_MAX+k; for(int j=0; j<global_frame.cols; j++) { list<Mat>::iterator next_frame; next_frame = samples_frame.begin(); list<Mat>::iterator next_lsbp; next_lsbp = samples_lsbp.begin(); int samples_matches = 0; double min_distance_sum = 0; //double L1_distance_min = 1000000; while(next_lsbp != samples_lsbp.end()) { double L1_distance = get_distance_L1(((Scalar)global_frame.at<Vec3b>(i, j))[0], ((Scalar)(*next_frame).at<Vec3b>(i, j))[0], ((Scalar)global_frame.at<Vec3b>(i, j))[1], ((Scalar)(*next_frame).at<Vec3b>(i, j))[1], ((Scalar)global_frame.at<Vec3b>(i, j))[2], ((Scalar)(*next_frame).at<Vec3b>(i, j))[2]); double min_distance = min_distance_L1(((Scalar)global_frame.at<Vec3b>(i, j))[0], ((Scalar)(*next_frame).at<Vec3b>(i, j))[0], ((Scalar)global_frame.at<Vec3b>(i, j))[1], ((Scalar)(*next_frame).at<Vec3b>(i, j))[1], ((Scalar)global_frame.at<Vec3b>(i, j))[2], ((Scalar)(*next_frame).at<Vec3b>(i, j))[2]); int d_hamming = Hamming_distance(global_svd, *next_lsbp, i+1, j+1, 0.05); min_distance_sum += min_distance; //UPDATE R D.at<float>(i,j) = L1_distance; //if((d_hamming < (threshold))) if((L1_distance < R.at<float>(i, j)) && (d_hamming < (threshold))) { samples_matches++; } next_frame++; next_lsbp++; } //UPDATE R(x) if(R.at<float>(i, j) > ((min_distance_sum/10)*Rscale)) { if(R.at<float>(i, j)*(1-Rlr) < 18) R.at<float>(i, j) = 18; else R.at<float>(i, j) = R.at<float>(i, j)*(1-Rlr); } else { if(R.at<float>(i, j)*(1+Rlr) > 500) R.at<float>(i, j) = 500; else R.at<float>(i, j) = R.at<float>(i, j)*(1+Rlr); } if(samples_matches < matches) { global_mask.at<uchar>(i, j) = 255;//White, Black 0 //UPDATE T(X) if(T.at<uchar>(i, j)+(1/(min_distance_sum/10)) < 200) T.at<uchar>(i, j) = T.at<uchar>(i, j)+(1/(min_distance_sum/10));//IF FOREGROUND else T.at<uchar>(i, j) = 200; } else { //UPDATE T(X) if(T.at<uchar>(i, j)-(0.05/(min_distance_sum/10)) > 2) T.at<uchar>(i, j) = T.at<uchar>(i, j)-(0.05/(min_distance_sum/10));//IF BACKGROUND else T.at<uchar>(i, j) = 2; //UPDATE B(X) because mask(i,j) is 0 if((rand()%200) < (200/T.at<uchar>(i, j))) { int random = rand()%10; list<Mat>::iterator next_frame_update; next_frame_update = samples_frame.begin(); list<Mat>::iterator next_lsbp_update; next_lsbp_update = samples_lsbp.begin(); list<Mat>::iterator next_change_lsbp_update; next_change_lsbp_update = samples_change_lsbp.begin(); for(int k=0; k<random; k++) { next_frame_update++; next_lsbp_update++; next_change_lsbp_update++; } (*next_frame_update).at<Vec3b>(i, j) = global_frame.at<Vec3b>(i,j); (*next_change_lsbp_update).at<float>(i+1, j+1) = 1; } } } } }*/ double get_distance_L1(double b1, double b2, double g1, double g2, double r1, double r2) { return sqrt(pow((b1-b2),2)+pow((g1-g2),2)+pow((r1-r2),2)); } double min_distance_L1(double b1, double b2, double g1, double g2, double r1, double r2) { if(abs(b1-b2) < abs(g1-g2)) { if(abs(r1-r2) < abs(b1-b2)) { return abs(r1-r2); } else return abs(b1-b2); } else { if(abs(r1-r2) < abs(g1-g2)) { return abs(r1-r2); } else return abs(g1-g2); } } /*void update_samples_lsbp() { list<Mat>::iterator next_frame; next_frame = samples_frame.begin(); list<Mat>::iterator next_lsbp; next_lsbp = samples_lsbp.begin(); int samples_matches = 0; while(next_lsbp != samples_lsbp.end()) { Mat svd = Mat::zeros(width+2, heigth+2, CV_32FC1); extract_LSBP(*next_frame, svd, 0.05); *next_lsbp = svd.clone(); next_frame++; next_lsbp++; } } */ /*void extract_LSBP_v2(Mat frame, Mat frame_change, Mat last_lsbp,Mat &r_lsbp, int tau=0.05) { Mat intensity; cvtColor(frame, intensity, COLOR_BGR2GRAY); Mat intensity_fr = Mat::zeros(frame.rows+2, frame.cols+2, CV_8UC1); for(int i=1; i<intensity_fr.rows-1; i++) { for(int j=1; j<intensity_fr.cols-1; j++) { intensity_fr.at<uchar>(i,j) = intensity.at<uchar>(i-1,j-1); } } for(int i=1; i<intensity_fr.rows-1; i++) { for(int j=1; j<intensity_fr.cols-1; j++) { if(validate_change(frame_change.at<float>(i-1,j-1), frame_change.at<float>(i-1,j), frame_change.at<float>(i-1,j+1), frame_change.at<float>(i,j-1), frame_change.at<float>(i,j), frame_change.at<float>(i,j+1), frame_change.at<float>(i+1,j-1), frame_change.at<float>(i+1,j), frame_change.at<float>(i+1,j+1))) { arma::mat m_svd; m_svd = {{((Scalar)intensity_fr.at<uchar>(i-1,j-1))[0], ((Scalar)intensity_fr.at<uchar>(i-1,j))[0], ((Scalar)intensity_fr.at<uchar>(i-1,j+1))[0]}, {((Scalar)intensity_fr.at<uchar>(i,j-1))[0], ((Scalar)intensity_fr.at<uchar>(i,j))[0], ((Scalar)intensity_fr.at<uchar>(i,j+1))[0]}, {((Scalar)intensity_fr.at<uchar>(i+1,j-1))[0], ((Scalar)intensity_fr.at<uchar>(i+1,j))[0], ((Scalar)intensity_fr.at<uchar>(i+1,j+1))[0]}}; r_lsbp.at<float>(i,j) = _SVD(m_svd); } else { r_lsbp.at<float>(i,j) = last_lsbp.at<float>(i,j); } } } intensity_fr.release(); }*/ void extract_LSBP_v2(Mat frame, Mat frame_change, Mat last_lsbp,Mat &r_lsbp, int tau=0.05) { Mat intensity; cvtColor(frame, intensity, COLOR_BGR2GRAY); Mat intensity_fr = Mat::zeros(frame.rows+2, frame.cols+2, CV_8UC1); for(int i=1; i<intensity_fr.rows-1; i++) { for(int j=1; j<intensity_fr.cols-1; j++) { intensity_fr.at<uchar>(i,j) = intensity.at<uchar>(i-1,j-1); } } part=0; global_intensity_fr = intensity_fr.clone(); global_change_frame = frame_change.clone(); global_last_lsbp = last_lsbp.clone(); global_output = r_lsbp.clone(); pthread_t threads[THREAD_MAX]; for (int i = 0; i < THREAD_MAX; i++) pthread_create(&threads[i], NULL, LSBP_parallel, (void*)NULL); //quick_sort(aa.begin(), aa.end()); for (int i = 0; i < THREAD_MAX; i++) pthread_join(threads[i], NULL); r_lsbp = global_output.clone(); intensity_fr.release(); } void* LSBP_parallel(void* arg) { int thread_part = part++; /*for(int i=0; i<global_mat.rows/THREAD_MAX; i++) { for(int j=0; j<global_mat.cols; j++) { global_mat.at<float>((global_mat.rows/THREAD_MAX)*thread_part+i,j) = 1; } }*/ for(int k=1; k<((global_intensity_fr.rows-2)/THREAD_MAX)+1; k++) { int i = ((global_intensity_fr.rows-2)/THREAD_MAX)*thread_part+k; //cout << "thread: " << thread_part <<" i: " << i << endl; for(int j=1; j<global_intensity_fr.cols-1; j++) { if(validate_change(global_change_frame.at<float>(i-1,j-1), global_change_frame.at<float>(i-1,j), global_change_frame.at<float>(i-1,j+1), global_change_frame.at<float>(i,j-1), global_change_frame.at<float>(i,j), global_change_frame.at<float>(i,j+1), global_change_frame.at<float>(i+1,j-1), global_change_frame.at<float>(i+1,j), global_change_frame.at<float>(i+1,j+1))) { arma::mat m_svd; m_svd = {{((Scalar)global_intensity_fr.at<uchar>(i-1,j-1))[0], ((Scalar)global_intensity_fr.at<uchar>(i-1,j))[0], ((Scalar)global_intensity_fr.at<uchar>(i-1,j+1))[0]}, {((Scalar)global_intensity_fr.at<uchar>(i,j-1))[0], ((Scalar)global_intensity_fr.at<uchar>(i,j))[0], ((Scalar)global_intensity_fr.at<uchar>(i,j+1))[0]}, {((Scalar)global_intensity_fr.at<uchar>(i+1,j-1))[0], ((Scalar)global_intensity_fr.at<uchar>(i+1,j))[0], ((Scalar)global_intensity_fr.at<uchar>(i+1,j+1))[0]}}; global_output.at<float>(i,j) = _SVD(m_svd); } else { global_output.at<float>(i,j) = global_last_lsbp.at<float>(i,j); } } } } bool validate_change(float i00, float i01, float i02, float i10, float i11, float i12, float i20, float i21, float i22) { if(i00 == 1 || i01 == 1 ||i02 == 1 ||i10 == 1 ||i11 == 1 ||i12 == 1 ||i20 == 1 ||i21 == 1 ||i22 == 1) return true; else return false; } void update_samples_lsbp() { list<Mat>::iterator next_frame; next_frame = samples_frame.begin(); list<Mat>::iterator next_lsbp; next_lsbp = samples_lsbp.begin(); list<Mat>::iterator next_lsbp_change; next_lsbp_change = samples_change_lsbp.begin(); //int samples_matches = 0; while(next_lsbp != samples_lsbp.end()) { Mat svd = Mat::zeros(width+2, heigth+2, CV_32FC1); //extract_LSBP_v2(*next_frame, *next_lsbp_change, *next_lsbp,svd, 0.05); extract_LSBP_cuda(*next_frame, svd); //extract_LSBP(*next_frame, svd, 0.05); *next_lsbp = svd.clone(); next_lsbp_change++; next_frame++; next_lsbp++; } } int Hamming_distance(Mat svd_frame, Mat svd_sample, int i, int j, double tau) { int hamming = 0; //if((abs((svd_frame.at<double>(i,j))-(svd_frame.at<double>(i-1,j-1))) < tau)) if((abs((svd_frame.at<float>(i,j))-(svd_frame.at<float>(i-1,j-1))) < tau) != (abs((svd_sample.at<float>(i,j))-(svd_sample.at<float>(i-1,j-1))) < tau)) { hamming++; } if((abs(svd_frame.at<float>(i,j)-svd_frame.at<float>(i-1,j)) < tau) != (abs(svd_sample.at<float>(i,j)-svd_sample.at<float>(i-1,j)) < tau)) { hamming++; } if((abs(svd_frame.at<float>(i,j)-svd_frame.at<float>(i-1,j+1)) < tau) != (abs(svd_sample.at<float>(i,j)-svd_sample.at<float>(i-1,j+1)) < tau)) { hamming++; } if((abs(svd_frame.at<float>(i,j)-svd_frame.at<float>(i,j-1)) < tau) != (abs(svd_sample.at<float>(i,j)-svd_sample.at<float>(i,j-1)) < tau)) { hamming++; } if((abs(svd_frame.at<float>(i,j)-svd_frame.at<float>(i,j+1)) < tau) != (abs(svd_sample.at<float>(i,j)-svd_sample.at<float>(i,j+1)) < tau)) { hamming++; } if((abs(svd_frame.at<float>(i,j)-svd_frame.at<float>(i+1,j-1)) < tau) != (abs(svd_sample.at<float>(i,j)-svd_sample.at<float>(i+1,j-1)) < tau)) { hamming++; } if((abs(svd_frame.at<float>(i,j)-svd_frame.at<float>(i+1,j)) < tau) != (abs(svd_sample.at<float>(i,j)-svd_sample.at<float>(i+1,j)) < tau)) { hamming++; } if((abs(svd_frame.at<float>(i,j)-svd_frame.at<float>(i+1,j+1)) < tau) != (abs(svd_sample.at<float>(i,j)-svd_sample.at<float>(i+1,j+1)) < tau)) { hamming++; } return hamming; } //return calcular los valores singulares y retorna (s[2]+s[1])/s[0] double _SVD(arma::mat matriz) { arma::mat U2, V2; arma::vec w2; arma::svd(U2, w2, V2, matriz); return ((w2[2]+w2[1])/w2[0]); } int clip(int i, int inferior, int superior, int val_range) { int i0; if(i<inferior) { i0 = rand()%val_range-rand()%val_range+inferior; } else { if(i>superior) { i0 = rand()%val_range-rand()%val_range+superior; } else { i0 = rand()%val_range-rand()%val_range+i; } } return i0; } void extract_LSBP_cuda(Mat frame, Mat &output) { Mat intensity; cvtColor(frame, intensity, COLOR_BGR2GRAY); //Mat intensity_fr = Mat::zeros(frame.rows+2, frame.cols+2, CV_8UC1); /*for(int i=1; i<intensity_fr.rows-1; i++) { for(int j=1; j<intensity_fr.cols-1; j++) { intensity_fr.at<uchar>(i,j) = intensity.at<uchar>(i-1,j-1); } }*/ //int filas = 1080; //int columnas = 1920; int filas = frame.rows+2; int columnas = frame.cols+2; float a[filas][columnas]; float *dev_a; float *dev_c; for(int i=1; i<filas-1; i++) { //cont = 0; for(int j=1; j<columnas-1; j++) { a[i][j] = ((Scalar)intensity.at<uchar>(i-1, j-1))[0]; } } // auto t11 = std::chrono::high_resolution_clock::now(); cudaMalloc((void**) &dev_a, filas*columnas*sizeof(float)); cudaMalloc((void**) &dev_c, filas*columnas*sizeof(float)); cudaMemcpy(dev_a, a, filas*columnas*sizeof(float), cudaMemcpyHostToDevice); dim3 dimThreadsBloque(Threads, Threads); float BFloat = (float) columnas / (float) Threads; int B = (int) ceil(BFloat); // El grid tendrá B número de bloques en x y y dim3 dimBloques(B, B); add<<<dimBloques, dimThreadsBloque>>>(dev_a, dev_c, filas, columnas); cudaMemcpy(a, dev_c, filas*columnas*sizeof(float), cudaMemcpyDeviceToHost); //auto t12 = std::chrono::high_resolution_clock::now(); cudaFree(dev_a); cudaFree(dev_c); for(int i=0; i<filas; i++) { for(int j=0; j<columnas; j++) { ((Scalar)output.at<uchar>(i,j))[0] = a[i][j]; } } //cout << std::chrono::duration_cast<std::chrono::milliseconds>(t12 - t11).count() << endl; }
e27de923f6729c0dfaea987ac8408ad1f9ef76e4.hip
// !!! This is a file automatically generated by hipify!!! #include <hip/hip_runtime.h> #include <stdio.h> #include <math.h> #define blockSize 512 #define real float __global__ void redukcja (int N, real* v, real* out) { size_t s = threadIdx.x + blockIdx.x * blockDim.x*2; int sID = threadIdx.x; size_t i; __shared__ real pom[blockSize]; pom[sID] = 0; if (s<N/2) pom[sID] = v[s] + v[s + blockDim.x]; __syncthreads(); for (i=blockDim.x/2; i>32; i>>=1){ if (sID<i){ pom[sID] += pom[sID + i]; } __syncthreads(); } if (sID < 32){ pom[sID] += pom[sID + 32]; __syncthreads(); pom[sID] += pom[sID + 16]; __syncthreads(); pom[sID] += pom[sID + 8]; __syncthreads(); pom[sID] += pom[sID + 4]; __syncthreads(); pom[sID] += pom[sID + 2]; __syncthreads(); pom[sID] += pom[sID + 1]; __syncthreads(); } if (sID==0) out[blockIdx.x] = pom[0]; } __global__ void redukcja2 (int N, real* v, real* out) { size_t s = threadIdx.x + blockIdx.x * blockDim.x; int sID = threadIdx.x; size_t i; __shared__ real pom[blockSize]; pom[sID] = 0; if (s<N) pom[sID] = v[s]; __syncthreads(); for (i=blockDim.x/2; i>0; i>>=1){ if (sID<i){ pom[sID] += pom[sID + i]; } __syncthreads(); } if (sID==0) out[blockIdx.x] = pom[0]; } __global__ void wypelnij (int N, real* v) { size_t s = threadIdx.x + blockIdx.x * blockDim.x; if (s<N) { v[s] = sin(s * 2. * M_PI / 10.); } } int main () { size_t N = blockSize * blockSize * blockSize; int blocks = (N + blockSize-1) / blockSize; float dt_ms; hipEvent_t event1, event2; hipEventCreate(&event1); hipEventCreate(&event2); real* v; hipMalloc( (void**) &v, N * sizeof(real) ); real* outV; hipMalloc( (void**) &outV, blockSize * blockSize * sizeof(real) ); real* outVV; hipMalloc( (void**) &outVV, blockSize * sizeof(real) ); real out; int i; int M = 10; hipLaunchKernelGGL(( wypelnij) , dim3(blocks), dim3(blockSize), 0, 0, N, v); hipEventRecord(event1, 0); for (i=0; i<M; i++){ hipLaunchKernelGGL(( redukcja), dim3(blocks/2), dim3(blockSize), 0, 0, N, v, outV); hipLaunchKernelGGL(( redukcja), dim3(blockSize/2), dim3(blockSize), 0, 0, blockSize*blockSize, outV, outVV); hipLaunchKernelGGL(( redukcja2), dim3(1), dim3(blockSize), 0, 0, blockSize, outVV, v); } hipEventRecord(event2, 0); hipEventSynchronize(event1); hipEventSynchronize(event2); hipEventElapsedTime(&dt_ms, event1, event2); hipMemcpy (&out, v, 1 * sizeof(real), hipMemcpyDeviceToHost); printf ("Czas redukcji: %f ms wynik; %f\n", dt_ms * 1./M, out); return 0; }
e27de923f6729c0dfaea987ac8408ad1f9ef76e4.cu
#include <cuda.h> #include <stdio.h> #include <math.h> #define blockSize 512 #define real float __global__ void redukcja (int N, real* v, real* out) { size_t s = threadIdx.x + blockIdx.x * blockDim.x*2; int sID = threadIdx.x; size_t i; __shared__ real pom[blockSize]; pom[sID] = 0; if (s<N/2) pom[sID] = v[s] + v[s + blockDim.x]; __syncthreads(); for (i=blockDim.x/2; i>32; i>>=1){ if (sID<i){ pom[sID] += pom[sID + i]; } __syncthreads(); } if (sID < 32){ pom[sID] += pom[sID + 32]; __syncthreads(); pom[sID] += pom[sID + 16]; __syncthreads(); pom[sID] += pom[sID + 8]; __syncthreads(); pom[sID] += pom[sID + 4]; __syncthreads(); pom[sID] += pom[sID + 2]; __syncthreads(); pom[sID] += pom[sID + 1]; __syncthreads(); } if (sID==0) out[blockIdx.x] = pom[0]; } __global__ void redukcja2 (int N, real* v, real* out) { size_t s = threadIdx.x + blockIdx.x * blockDim.x; int sID = threadIdx.x; size_t i; __shared__ real pom[blockSize]; pom[sID] = 0; if (s<N) pom[sID] = v[s]; __syncthreads(); for (i=blockDim.x/2; i>0; i>>=1){ if (sID<i){ pom[sID] += pom[sID + i]; } __syncthreads(); } if (sID==0) out[blockIdx.x] = pom[0]; } __global__ void wypelnij (int N, real* v) { size_t s = threadIdx.x + blockIdx.x * blockDim.x; if (s<N) { v[s] = sin(s * 2. * M_PI / 10.); } } int main () { size_t N = blockSize * blockSize * blockSize; int blocks = (N + blockSize-1) / blockSize; float dt_ms; cudaEvent_t event1, event2; cudaEventCreate(&event1); cudaEventCreate(&event2); real* v; cudaMalloc( (void**) &v, N * sizeof(real) ); real* outV; cudaMalloc( (void**) &outV, blockSize * blockSize * sizeof(real) ); real* outVV; cudaMalloc( (void**) &outVV, blockSize * sizeof(real) ); real out; int i; int M = 10; wypelnij <<<blocks, blockSize>>> (N, v); cudaEventRecord(event1, 0); for (i=0; i<M; i++){ redukcja<<<blocks/2, blockSize>>> (N, v, outV); redukcja<<<blockSize/2, blockSize>>> (blockSize*blockSize, outV, outVV); redukcja2<<<1, blockSize>>> (blockSize, outVV, v); } cudaEventRecord(event2, 0); cudaEventSynchronize(event1); cudaEventSynchronize(event2); cudaEventElapsedTime(&dt_ms, event1, event2); cudaMemcpy (&out, v, 1 * sizeof(real), cudaMemcpyDeviceToHost); printf ("Czas redukcji: %f ms wynik; %f\n", dt_ms * 1./M, out); return 0; }
fbcaa52b3628a61259365d50d59fdd3300a10940.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" // This uses a lot of code from Caffe (http://caffe.berkeleyvision.org/); // sources are clearly marked. Below we reproduce the original license of // the Caffe software. /* Copyright (c) 2014, The Regents of the University of California (Regents) All rights reserved. Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met: 1. Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer. 2. Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution. THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ #undef _GLIBCXX_ATOMIC_BUILTINS // (borrowed from Caffe: https://github.com/BVLC/caffe/blob/master/src/caffe/caffe_common.hpp) // CUDA: grid stride looping #define CUDA_KERNEL_LOOP(i, n) \ for (int i = blockIdx.x * blockDim.x + threadIdx.x; \ i < (n); \ i += blockDim.x * gridDim.x) // CUDA: thread number configuration. // Use 1024 threads per block, which requires cuda sm_2x or above, // or fall back to attempt compatibility (best of luck to you). #if __CUDA_ARCH__ >= 200 const int CUDA_NUM_THREADS = 1024; #else const int CUDA_NUM_THREADS = 512; #endif // CUDA: number of blocks for threads. inline int GET_BLOCKS(const int N) { return (N + CUDA_NUM_THREADS - 1) / CUDA_NUM_THREADS; } // (borrowed from Caffe: https://github.com/BVLC/caffe/blob/master/src/caffe/util/im2col.cu) // Kernels for fast unfold + copy // CUDA kernel for the case of dilation __global__ void dilated_im2col_kernel(const int n, const float* data_im, const int height, const int width, const int kernel_h, const int kernel_w, const int dilation_h, const int dilation_w, const int pad_h, const int pad_w, const int stride_h, const int stride_w, const int height_col, const int width_col, float* data_col) { CUDA_KERNEL_LOOP(index, n) { const int h_index = index / width_col; const int h_col = h_index % height_col; const int w_col = index % width_col; const int c_im = h_index / height_col; const int c_col = c_im * kernel_h * kernel_w; const int h_offset = h_col * stride_h - pad_h; const int w_offset = w_col * stride_w - pad_w; float* data_col_ptr = data_col; data_col_ptr += (c_col * height_col + h_col) * width_col + w_col; const float* data_im_ptr = data_im; data_im_ptr += (c_im * height + h_offset) * width + w_offset; for (int i = 0; i < kernel_h; ++i) { for (int j = 0; j < kernel_w; ++j) { int h_im = h_offset + i * dilation_h; int w_im = w_offset + j * dilation_w; *data_col_ptr = (h_im >= 0 && w_im >= 0 && h_im < height && w_im < width) ? data_im_ptr[i * dilation_h * width + j * dilation_w] : 0; data_col_ptr += height_col * width_col; } } } } __global__ void im2col_kernel(const int n, const float* data_im, const int height, const int width, const int kernel_h, const int kernel_w, const int pad_h, const int pad_w, const int stride_h, const int stride_w, const int height_col, const int width_col, float* data_col) { CUDA_KERNEL_LOOP(index, n) { const int h_index = index / width_col; const int h_col = h_index % height_col; const int w_col = index % width_col; const int c_im = h_index / height_col; const int c_col = c_im * kernel_h * kernel_w; const int h_offset = h_col * stride_h - pad_h; const int w_offset = w_col * stride_w - pad_w; float* data_col_ptr = data_col; data_col_ptr += (c_col * height_col + h_col) * width_col + w_col; const float* data_im_ptr = data_im; data_im_ptr += (c_im * height + h_offset) * width + w_offset; for (int i = 0; i < kernel_h; ++i) { for (int j = 0; j < kernel_w; ++j) { int h_im = h_offset + i ; int w_im = w_offset + j ; *data_col_ptr = (h_im >= 0 && w_im >= 0 && h_im < height && w_im < width) ? data_im_ptr[i * width + j] : 0; data_col_ptr += height_col * width_col; } } } } void im2col(const float* data_im, const int channels, const int height, const int width, const int kernel_h, const int kernel_w, const int dilation_h, const int dilation_w, const int pad_h, const int pad_w, const int stride_h, const int stride_w, float* data_col) { // We are going to launch channels * height_col * width_col kernels, each // kernel responsible for copying a single-channel grid. int dil_kernel_h = (kernel_h - 1) * dilation_h + 1; int dil_kernel_w = (kernel_w - 1) * dilation_w + 1; int height_col = (height + 2 * pad_h - dil_kernel_h) / stride_h + 1; int width_col = (width + 2 * pad_w - dil_kernel_w) / stride_w + 1; int num_kernels = channels * height_col * width_col; if(dilation_h != 1 || dilation_w != 1){ hipLaunchKernelGGL(( dilated_im2col_kernel), dim3(GET_BLOCKS(num_kernels)), dim3(CUDA_NUM_THREADS), 0, 0, num_kernels, data_im, height, width, kernel_h, kernel_w, dilation_h, dilation_w, pad_h, pad_w, stride_h, stride_w, height_col, width_col, data_col); } else{ hipLaunchKernelGGL(( im2col_kernel), dim3(GET_BLOCKS(num_kernels)), dim3(CUDA_NUM_THREADS), 0, 0, num_kernels, data_im, height, width, kernel_h, kernel_w, pad_h, pad_w, stride_h, stride_w, height_col, width_col, data_col); } } // CUDA kernel for the case of dilation __global__ void dilated_col2im_kernel(const int n, const float* data_col, const int height, const int width, const int channels, const int kernel_h, const int kernel_w, const int dilation_h, const int dilation_w, const int pad_h, const int pad_w, const int stride_h, const int stride_w, const int height_col, const int width_col, float* data_im) { CUDA_KERNEL_LOOP(index, n) { float val = 0; const int w_im = index % width + pad_w; const int h_im = (index / width) % height + pad_h; const int c_im = index / (width * height); int kernel_extent_w = (kernel_w - 1) * dilation_w + 1; int kernel_extent_h = (kernel_h - 1) * dilation_h + 1; // compute the start and end of the output const int w_col_start = (w_im < kernel_extent_w) ? 0 : (w_im - kernel_extent_w) / stride_w + 1; const int w_col_end = min(w_im / stride_w + 1, width_col); const int h_col_start = (h_im < kernel_extent_h) ? 0 : (h_im - kernel_extent_h) / stride_h + 1; const int h_col_end = min(h_im / stride_h + 1, height_col); // TODO: use LCM of stride and dilation to avoid unnecessary loops for (int h_col = h_col_start; h_col < h_col_end; h_col += 1) { for (int w_col = w_col_start; w_col < w_col_end; w_col += 1) { int h_k = (h_im - h_col * stride_h); int w_k = (w_im - w_col * stride_w); if (h_k % dilation_h == 0 && w_k % dilation_w == 0) { h_k /= dilation_h; w_k /= dilation_w; int data_col_index = (((c_im * kernel_h + h_k) * kernel_w + w_k) * height_col + h_col) * width_col + w_col; val += data_col[data_col_index]; } } } data_im[index] = val; } } __global__ void col2im_kernel(const int n, const float* data_col, const int height, const int width, const int channels, const int kernel_h, const int kernel_w, const int pad_h, const int pad_w, const int stride_h, const int stride_w, const int height_col, const int width_col, float* data_im) { CUDA_KERNEL_LOOP(index, n) { float val = 0; const int w_im = index % width + pad_w; const int h_im = (index / width) % height + pad_h; const int c_im = index / (width * height); // compute the start and end of the output const int w_col_start = (w_im < kernel_w) ? 0 : (w_im - kernel_w) / stride_w + 1; const int w_col_end = min(w_im / stride_w + 1, width_col); const int h_col_start = (h_im < kernel_h) ? 0 : (h_im - kernel_h) / stride_h + 1; const int h_col_end = min(h_im / stride_h + 1, height_col); // equivalent implementation, no dilation int offset = (c_im * kernel_h * kernel_w + h_im * kernel_w + w_im) * height_col * width_col; int coeff_h_col = (1 - stride_h * kernel_w * height_col) * width_col; int coeff_w_col = (1 - stride_w * height_col * width_col); for (int h_col = h_col_start; h_col < h_col_end; ++h_col) { for (int w_col = w_col_start; w_col < w_col_end; ++w_col) { val += data_col[offset + h_col * coeff_h_col + w_col * coeff_w_col]; } } data_im[index] = val; } } void col2im(const float* data_col, const int channels, const int height, const int width, const int patch_h, const int patch_w, const int dilation_h, const int dilation_w, const int pad_h, const int pad_w, const int stride_h, const int stride_w, float* data_im) { int dil_patch_h = (patch_h - 1) * dilation_h + 1; int dil_patch_w = (patch_w - 1) * dilation_w + 1; int height_col = (height + 2 * pad_h - dil_patch_h) / stride_h + 1; int width_col = (width + 2 * pad_w - dil_patch_w) / stride_w + 1; int num_kernels = channels * height * width; // To avoid involving atomic operations, we will launch one kernel per // bottom dimension, and then in the kernel add up the top dimensions. if(dilation_h != 1 || dilation_w != 1){ hipLaunchKernelGGL(( dilated_col2im_kernel), dim3(GET_BLOCKS(num_kernels)), dim3(CUDA_NUM_THREADS), 0, 0, num_kernels, data_col, height, width, channels, patch_h, patch_w, dilation_h, dilation_w, pad_h, pad_w, stride_h, stride_w, height_col, width_col, data_im); } else{ hipLaunchKernelGGL(( col2im_kernel), dim3(GET_BLOCKS(num_kernels)), dim3(CUDA_NUM_THREADS), 0, 0, num_kernels, data_col, height, width, channels, patch_h, patch_w, pad_h, pad_w, stride_h, stride_w, height_col, width_col, data_im); } } // Theano op code // Authors: Arjun Jain, Frederic Bastien, Jan Schluter // Reference code: https://github.com/BVLC/caffe/blob/master/src/caffe/layers/conv_layer.cu // and https://github.com/torch/cunn/blob/master/SpatialConvolutionMM.cu CudaNdarray* corrMM(CudaNdarray *const bottom, CudaNdarray *const weight, CudaNdarray *const top, const int direction, const int dH = 1, const int dW = 1, const int dilH = 1, const int dilW = 1, const int padH = 0, const int padW = 0) { if (bottom->nd != 4) { PyErr_SetString(PyExc_ValueError, "GpuCorrMM requires bottom of 4D"); return NULL; } if (!CudaNdarray_is_c_contiguous(bottom)) { PyErr_Format(PyExc_ValueError, "GpuCorrMM requires bottom to be C-contiguous, " "but strides are: %d %d %d %d\n", CudaNdarray_HOST_STRIDES(bottom)[0], CudaNdarray_HOST_STRIDES(bottom)[1], CudaNdarray_HOST_STRIDES(bottom)[2], CudaNdarray_HOST_STRIDES(bottom)[3]); return NULL; } if (weight->nd != 4) { PyErr_SetString(PyExc_ValueError, "GpuCorrMM requires weight of 4D"); return NULL; } if (!CudaNdarray_is_c_contiguous(weight)) { PyErr_Format(PyExc_ValueError, "GpuCorrMM requires weight to be C-contiguous, " "but strides are: %d %d %d %d\n", CudaNdarray_HOST_STRIDES(weight)[0], CudaNdarray_HOST_STRIDES(weight)[1], CudaNdarray_HOST_STRIDES(weight)[2], CudaNdarray_HOST_STRIDES(weight)[3]); return NULL; } if (top->nd != 4) { PyErr_SetString(PyExc_ValueError, "GpuCorrMM requires top of 4D"); return NULL; } if (!CudaNdarray_is_c_contiguous(top)) { PyErr_Format(PyExc_ValueError, "GpuCorrMM requires top to be C-contiguous, " "but strides are: %d %d %d %d\n", CudaNdarray_HOST_STRIDES(top)[0], CudaNdarray_HOST_STRIDES(top)[1], CudaNdarray_HOST_STRIDES(top)[2], CudaNdarray_HOST_STRIDES(top)[3]); return NULL; } // Extract some shape information for later and check shape consistency // bottom: (batchSize, nChannels, bottomHeight, bottomWidth) const int batchSize = CudaNdarray_HOST_DIMS(bottom)[0]; const int nChannels = CudaNdarray_HOST_DIMS(bottom)[1]; const int bottomHeight = CudaNdarray_HOST_DIMS(bottom)[2]; const int bottomWidth = CudaNdarray_HOST_DIMS(bottom)[3]; // weights: (nFilters, nChannels, rows, columns) const int nFilters = CudaNdarray_HOST_DIMS(weight)[0]; const int kH = CudaNdarray_HOST_DIMS(weight)[2]; const int kW = CudaNdarray_HOST_DIMS(weight)[3]; if (nChannels != CudaNdarray_HOST_DIMS(weight)[1]) { PyErr_SetString(PyExc_ValueError, "GpuCorrMM images and kernel must have the same stack size\n"); return NULL; } // implicit dilated filter const int dil_kH = (kH - 1) * dilH + 1; const int dil_kW = (kW - 1) * dilW + 1; // top: (batchSize, nFilters, topHeight, topWidth) const int topHeightNoDH = (bottomHeight + 2*padH - dil_kH); const int topWidthNoDW = (bottomWidth + 2*padW - dil_kW); // the above values might be negative so we need to use Python-like // flooring integer division to be compatible with get_conv_output. // note: this macro implements Python's // for negative x only #define _CONV_FLOORDIV_X(x,y) ((x < 0) ? (- ((-x) / y) - (((-x) % y) == 0 ? 0 : 1)) : (x / y)) const int topHeight = _CONV_FLOORDIV_X(topHeightNoDH, dH) + 1; const int topWidth = _CONV_FLOORDIV_X(topWidthNoDW, dW) + 1; #undef _CONV_FLOORDIV if (batchSize != CudaNdarray_HOST_DIMS(top)[0] || nFilters != CudaNdarray_HOST_DIMS(top)[1] || topHeight != CudaNdarray_HOST_DIMS(top)[2] || topWidth != CudaNdarray_HOST_DIMS(top)[3]) { PyErr_Format(PyExc_ValueError, "GpuCorrMM shape inconsistency:\n" " bottom shape: %d %d %d %d\n" " weight shape: %d %d %d %d\n" " top shape: %d %d %d %d (expected %d %d %d %d)\n", batchSize, nChannels, bottomHeight, bottomWidth, nFilters, nChannels, kH, kW, CudaNdarray_HOST_DIMS(top)[0], CudaNdarray_HOST_DIMS(top)[1], CudaNdarray_HOST_DIMS(top)[2], CudaNdarray_HOST_DIMS(top)[3], batchSize, nFilters, topHeight, topWidth); return NULL; } // Create temporary columns int col_dim[2]; col_dim[0] = nChannels * kW * kH; col_dim[1] = topHeight * topWidth; CudaNdarray* col = (CudaNdarray*)CudaNdarray_NewDims(2, col_dim); if (NULL == col) { PyErr_Format(PyExc_RuntimeError, "GpuCorrMM failed to allocate working memory of %d x %d\n", col_dim[0], col_dim[1]); return NULL; } // Define some useful variables const int bottom_stride = CudaNdarray_HOST_STRIDES(bottom)[0]; const int top_stride = CudaNdarray_HOST_STRIDES(top)[0]; const int K_ = col_dim[0]; const int N_ = col_dim[1]; const int M_ = nFilters; const float one = 1.0f; const float zero = 0.0f; CudaNdarray *output; if (direction == 0) { // forward pass output = top; if (batchSize == 0 || nChannels == 0 || nFilters == 0) { hipError_t err = hipMemset(output->devdata, 0, CudaNdarray_SIZE(output) * sizeof(real)); if (err != hipSuccess) { PyErr_Format(PyExc_RuntimeError, "GpuCorrMM could not fill the output with zeros: %s", hipGetErrorString(err)); Py_DECREF(col); return NULL; } Py_DECREF(col); return output; } // valid correlation: im2col, then gemm // Iterate over batch for (int n = 0; n < batchSize; n++) { // First, im2col im2col(bottom->devdata + n * bottom_stride, nChannels, bottomHeight, bottomWidth, kH, kW, dilH, dilW, padH, padW, dH, dW, col->devdata); hipError_t err = hipGetLastError(); if (err != hipSuccess) { PyErr_Format(PyExc_RuntimeError, "GpuCorrMM encountered a CUDA error in im2col: %s\n" "This could be a known bug in CUDA, please see the " "GpuCorrMM() documentation.\n", hipGetErrorString(err)); Py_DECREF(col); return NULL; } // Second, gemm hipblasStatus_t status = hipblasSgemm(handle, HIPBLAS_OP_N, HIPBLAS_OP_N, N_, M_, K_, &one, col->devdata, N_, weight->devdata, K_, &zero, top->devdata + n * top_stride, N_); if (status != HIPBLAS_STATUS_SUCCESS) { PyErr_Format(PyExc_RuntimeError, "GpuCorrMM encountered a CUBLAS error: %s\n" "This could be a known bug in CUDA, please see the " "GpuCorrMM() documentation.\n", cublasGetErrorString(status)); Py_DECREF(col); return NULL; } } /* // Original caffe code for comparison // https://github.com/BVLC/caffe/blob/master/src/caffe/layers/conv_layer.cu // Note that this is for grouped convolution; we can ignore groups here, // but the group-related offsets help explain what M_, N_ and K_ are int weight_offset = M_ * K_; int col_offset = K_ * N_; int top_offset = M_ * N_; for (int n = 0; n < num_; ++n) { // First, im2col im2col_gpu(bottom_data + bottom[i]->offset(n), channels_, height_, width_, kernel_h_, kernel_w_, pad_h_, pad_w_, stride_h_, stride_w_, col_data); // Second, innerproduct with groups for (int g = 0; g < group_; ++g) { caffe_gpu_gemm<Dtype>(CblasNoTrans, CblasNoTrans, M_, N_, K_, (Dtype)1., weight + weight_offset * g, col_data + col_offset * g, (Dtype)0., top_data + (*top)[i]->offset(n) + top_offset * g); == (see https://github.com/BVLC/caffe/blob/master/src/caffe/util/math_functions.cu#L16) hipblasSgemm(HIPBLAS_OP_N, HIPBLAS_OP_N, N_, M_, K_, 1., col_data + col_offset * g, N_, weight + weight_offset * g, K_, 0., top_data + (*top)[i]->offset(n) + top_offset * g, N_); } } */ } else if (direction == 1) { // backprop wrt. weights output = weight; if (batchSize == 0 || nChannels == 0 || nFilters == 0) { hipError_t err = hipMemset(output->devdata, 0, CudaNdarray_SIZE(output) * sizeof(real)); if (err != hipSuccess) { PyErr_Format(PyExc_RuntimeError, "GpuCorrMM grad wrt. weights could not fill the output with zeros: %s", hipGetErrorString(err)); Py_DECREF(col); return NULL; } Py_DECREF(col); return output; } // valid convolution: im2col, then gemm // Iterate over batch for (int n = 0; n < batchSize; n++) { // First, im2col im2col(bottom->devdata + n * bottom_stride, nChannels, bottomHeight, bottomWidth, kH, kW, dilH, dilW, padH, padW, dH, dW, col->devdata); hipError_t err = hipGetLastError(); if (err != hipSuccess) { PyErr_Format(PyExc_RuntimeError, "GpuCorrMM encountered a CUDA error in im2col: %s\n" "This could be a known bug in CUDA, please see the " "GpuCorrMM() documentation.\n", hipGetErrorString(err)); Py_DECREF(col); return NULL; } // Second, gemm // Note that we accumulate into weight. We do so by setting beta = 0 // for the first iteration and beta = 1 for subsequent ones. (This // is faster than setting weight to all zeros before the loop.) hipblasStatus_t status = hipblasSgemm(handle, HIPBLAS_OP_T, HIPBLAS_OP_N, K_, M_, N_, &one, col->devdata, N_, top->devdata + n * top_stride, N_, (n == 0) ? &zero : &one, weight->devdata, K_); if (status != HIPBLAS_STATUS_SUCCESS) { PyErr_Format(PyExc_RuntimeError, "GpuCorrMM encountered a CUBLAS error: %s\n" "This could be a known bug in CUDA, please see the " "GpuCorrMM() documentation.\n", cublasGetErrorString(status)); Py_DECREF(col); return NULL; } } /* // Original caffe code for comparison // https://github.com/BVLC/caffe/blob/master/src/caffe/layers/conv_layer.cu // Note that this is for grouped convolution; we can ignore groups for (int n = 0; n < num_; ++n) { // Since we saved memory in the forward pass by not storing all col // data, we will need to recompute them. im2col_gpu(bottom_data + (*bottom)[i]->offset(n), channels_, height_, width_, kernel_h_, kernel_w_, pad_h_, pad_w_, stride_h_, stride_w_, col_data); // gradient w.r.t. weight. Note that we will accumulate diffs. for (int g = 0; g < group_; ++g) { caffe_gpu_gemm<Dtype>(CblasNoTrans, CblasTrans, M_, K_, N_, (Dtype)1., top_diff + top[i]->offset(n) + top_offset * g, col_data + col_offset * g, (Dtype)1., weight_diff + weight_offset * g); == (see https://github.com/BVLC/caffe/blob/master/src/caffe/util/math_functions.cu#L16) hipblasSgemm(HIPBLAS_OP_T, HIPBLAS_OP_N, K_, M_, N_, 1.0, col_data + col_offset * g, N_, top_diff + top[i]->offset(n) + top_offset * g, N_, 1.0, weight_diff + weight_offset * g, K_); } } */ } else if (direction == 2) { // backprop wrt. inputs output = bottom; if (batchSize == 0 || nChannels == 0 || nFilters == 0) { hipError_t err = hipMemset(output->devdata, 0, CudaNdarray_SIZE(output) * sizeof(real)); if (err != hipSuccess) { PyErr_Format(PyExc_RuntimeError, "GpuCorrMM grad wrt. inputs could not fill the output with zeros: %s", hipGetErrorString(err)); Py_DECREF(col); return NULL; } Py_DECREF(col); return output; } // full convolution: gemm, then col2im // Iterate over batch for (int n = 0; n < batchSize; n++) { // gemm into columns hipblasStatus_t status = hipblasSgemm(handle, HIPBLAS_OP_N, HIPBLAS_OP_T, N_, K_, M_, &one, top->devdata + n * top_stride, N_, weight->devdata, K_, &zero, col->devdata, N_); if (status != HIPBLAS_STATUS_SUCCESS) { PyErr_Format(PyExc_RuntimeError, "GpuCorrMM encountered a CUBLAS error: %s\n" "This could be a known bug in CUDA, please see the " "GpuCorrMM() documentation.\n", cublasGetErrorString(status)); Py_DECREF(col); return NULL; } // col2im back to the data col2im(col->devdata, nChannels, bottomHeight, bottomWidth, kH, kW, dilH, dilW, padH, padW, dH, dW, bottom->devdata + n * bottom_stride); hipError_t err = hipGetLastError(); if (err != hipSuccess) { PyErr_Format(PyExc_RuntimeError, "GpuCorrMM encountered a CUDA error in col2im: %s\n" "This could be a known bug in CUDA, please see the " "GpuCorrMM() documentation.\n", hipGetErrorString(err)); Py_DECREF(col); return NULL; } } /* // Original caffe code for comparison // https://github.com/BVLC/caffe/blob/master/src/caffe/layers/conv_layer.cu for (int n = 0; n < num_; ++n) { // gradient w.r.t. bottom data, if necessary if (propagate_down[i]) { for (int g = 0; g < group_; ++g) { caffe_gpu_gemm<Dtype>(CblasTrans, CblasNoTrans, K_, N_, M_, (Dtype)1., weight + weight_offset * g, top_diff + top[i]->offset(n) + top_offset * g, (Dtype)0., col_diff + col_offset * g); == (see https://github.com/BVLC/caffe/blob/master/src/caffe/util/math_functions.cu#L16) hipblasSgemm(HIPBLAS_OP_N, HIPBLAS_OP_T, N_, K_, M_, 1., top_diff + top[i]->offset(n) + top_offset * g, N_, weight + weight_offset * g, K_, 0., col_diff + col_offset * g, N_); } // col2im back to the data col2im_gpu(col_diff, channels_, height_, width_, kernel_h_, kernel_w_, pad_h_, pad_w_, stride_h_, stride_w_, bottom_diff + (*bottom)[i]->offset(n)); } } */ } // Free temporary columns Py_DECREF(col); // Note that we don't change the refcount of the output matrix here. Output // (re)allocation and refcounting is done in BaseGpuCorrMM.c_code_helper(); // in here output is just aliased to one of bottom, weights, or top. return output; }
fbcaa52b3628a61259365d50d59fdd3300a10940.cu
// This uses a lot of code from Caffe (http://caffe.berkeleyvision.org/); // sources are clearly marked. Below we reproduce the original license of // the Caffe software. /* Copyright (c) 2014, The Regents of the University of California (Regents) All rights reserved. Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met: 1. Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer. 2. Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution. THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ #undef _GLIBCXX_ATOMIC_BUILTINS // (borrowed from Caffe: https://github.com/BVLC/caffe/blob/master/src/caffe/caffe_common.hpp) // CUDA: grid stride looping #define CUDA_KERNEL_LOOP(i, n) \ for (int i = blockIdx.x * blockDim.x + threadIdx.x; \ i < (n); \ i += blockDim.x * gridDim.x) // CUDA: thread number configuration. // Use 1024 threads per block, which requires cuda sm_2x or above, // or fall back to attempt compatibility (best of luck to you). #if __CUDA_ARCH__ >= 200 const int CUDA_NUM_THREADS = 1024; #else const int CUDA_NUM_THREADS = 512; #endif // CUDA: number of blocks for threads. inline int GET_BLOCKS(const int N) { return (N + CUDA_NUM_THREADS - 1) / CUDA_NUM_THREADS; } // (borrowed from Caffe: https://github.com/BVLC/caffe/blob/master/src/caffe/util/im2col.cu) // Kernels for fast unfold + copy // CUDA kernel for the case of dilation __global__ void dilated_im2col_kernel(const int n, const float* data_im, const int height, const int width, const int kernel_h, const int kernel_w, const int dilation_h, const int dilation_w, const int pad_h, const int pad_w, const int stride_h, const int stride_w, const int height_col, const int width_col, float* data_col) { CUDA_KERNEL_LOOP(index, n) { const int h_index = index / width_col; const int h_col = h_index % height_col; const int w_col = index % width_col; const int c_im = h_index / height_col; const int c_col = c_im * kernel_h * kernel_w; const int h_offset = h_col * stride_h - pad_h; const int w_offset = w_col * stride_w - pad_w; float* data_col_ptr = data_col; data_col_ptr += (c_col * height_col + h_col) * width_col + w_col; const float* data_im_ptr = data_im; data_im_ptr += (c_im * height + h_offset) * width + w_offset; for (int i = 0; i < kernel_h; ++i) { for (int j = 0; j < kernel_w; ++j) { int h_im = h_offset + i * dilation_h; int w_im = w_offset + j * dilation_w; *data_col_ptr = (h_im >= 0 && w_im >= 0 && h_im < height && w_im < width) ? data_im_ptr[i * dilation_h * width + j * dilation_w] : 0; data_col_ptr += height_col * width_col; } } } } __global__ void im2col_kernel(const int n, const float* data_im, const int height, const int width, const int kernel_h, const int kernel_w, const int pad_h, const int pad_w, const int stride_h, const int stride_w, const int height_col, const int width_col, float* data_col) { CUDA_KERNEL_LOOP(index, n) { const int h_index = index / width_col; const int h_col = h_index % height_col; const int w_col = index % width_col; const int c_im = h_index / height_col; const int c_col = c_im * kernel_h * kernel_w; const int h_offset = h_col * stride_h - pad_h; const int w_offset = w_col * stride_w - pad_w; float* data_col_ptr = data_col; data_col_ptr += (c_col * height_col + h_col) * width_col + w_col; const float* data_im_ptr = data_im; data_im_ptr += (c_im * height + h_offset) * width + w_offset; for (int i = 0; i < kernel_h; ++i) { for (int j = 0; j < kernel_w; ++j) { int h_im = h_offset + i ; int w_im = w_offset + j ; *data_col_ptr = (h_im >= 0 && w_im >= 0 && h_im < height && w_im < width) ? data_im_ptr[i * width + j] : 0; data_col_ptr += height_col * width_col; } } } } void im2col(const float* data_im, const int channels, const int height, const int width, const int kernel_h, const int kernel_w, const int dilation_h, const int dilation_w, const int pad_h, const int pad_w, const int stride_h, const int stride_w, float* data_col) { // We are going to launch channels * height_col * width_col kernels, each // kernel responsible for copying a single-channel grid. int dil_kernel_h = (kernel_h - 1) * dilation_h + 1; int dil_kernel_w = (kernel_w - 1) * dilation_w + 1; int height_col = (height + 2 * pad_h - dil_kernel_h) / stride_h + 1; int width_col = (width + 2 * pad_w - dil_kernel_w) / stride_w + 1; int num_kernels = channels * height_col * width_col; if(dilation_h != 1 || dilation_w != 1){ dilated_im2col_kernel<<<GET_BLOCKS(num_kernels), CUDA_NUM_THREADS>>>( num_kernels, data_im, height, width, kernel_h, kernel_w, dilation_h, dilation_w, pad_h, pad_w, stride_h, stride_w, height_col, width_col, data_col); } else{ im2col_kernel<<<GET_BLOCKS(num_kernels), CUDA_NUM_THREADS>>>( num_kernels, data_im, height, width, kernel_h, kernel_w, pad_h, pad_w, stride_h, stride_w, height_col, width_col, data_col); } } // CUDA kernel for the case of dilation __global__ void dilated_col2im_kernel(const int n, const float* data_col, const int height, const int width, const int channels, const int kernel_h, const int kernel_w, const int dilation_h, const int dilation_w, const int pad_h, const int pad_w, const int stride_h, const int stride_w, const int height_col, const int width_col, float* data_im) { CUDA_KERNEL_LOOP(index, n) { float val = 0; const int w_im = index % width + pad_w; const int h_im = (index / width) % height + pad_h; const int c_im = index / (width * height); int kernel_extent_w = (kernel_w - 1) * dilation_w + 1; int kernel_extent_h = (kernel_h - 1) * dilation_h + 1; // compute the start and end of the output const int w_col_start = (w_im < kernel_extent_w) ? 0 : (w_im - kernel_extent_w) / stride_w + 1; const int w_col_end = min(w_im / stride_w + 1, width_col); const int h_col_start = (h_im < kernel_extent_h) ? 0 : (h_im - kernel_extent_h) / stride_h + 1; const int h_col_end = min(h_im / stride_h + 1, height_col); // TODO: use LCM of stride and dilation to avoid unnecessary loops for (int h_col = h_col_start; h_col < h_col_end; h_col += 1) { for (int w_col = w_col_start; w_col < w_col_end; w_col += 1) { int h_k = (h_im - h_col * stride_h); int w_k = (w_im - w_col * stride_w); if (h_k % dilation_h == 0 && w_k % dilation_w == 0) { h_k /= dilation_h; w_k /= dilation_w; int data_col_index = (((c_im * kernel_h + h_k) * kernel_w + w_k) * height_col + h_col) * width_col + w_col; val += data_col[data_col_index]; } } } data_im[index] = val; } } __global__ void col2im_kernel(const int n, const float* data_col, const int height, const int width, const int channels, const int kernel_h, const int kernel_w, const int pad_h, const int pad_w, const int stride_h, const int stride_w, const int height_col, const int width_col, float* data_im) { CUDA_KERNEL_LOOP(index, n) { float val = 0; const int w_im = index % width + pad_w; const int h_im = (index / width) % height + pad_h; const int c_im = index / (width * height); // compute the start and end of the output const int w_col_start = (w_im < kernel_w) ? 0 : (w_im - kernel_w) / stride_w + 1; const int w_col_end = min(w_im / stride_w + 1, width_col); const int h_col_start = (h_im < kernel_h) ? 0 : (h_im - kernel_h) / stride_h + 1; const int h_col_end = min(h_im / stride_h + 1, height_col); // equivalent implementation, no dilation int offset = (c_im * kernel_h * kernel_w + h_im * kernel_w + w_im) * height_col * width_col; int coeff_h_col = (1 - stride_h * kernel_w * height_col) * width_col; int coeff_w_col = (1 - stride_w * height_col * width_col); for (int h_col = h_col_start; h_col < h_col_end; ++h_col) { for (int w_col = w_col_start; w_col < w_col_end; ++w_col) { val += data_col[offset + h_col * coeff_h_col + w_col * coeff_w_col]; } } data_im[index] = val; } } void col2im(const float* data_col, const int channels, const int height, const int width, const int patch_h, const int patch_w, const int dilation_h, const int dilation_w, const int pad_h, const int pad_w, const int stride_h, const int stride_w, float* data_im) { int dil_patch_h = (patch_h - 1) * dilation_h + 1; int dil_patch_w = (patch_w - 1) * dilation_w + 1; int height_col = (height + 2 * pad_h - dil_patch_h) / stride_h + 1; int width_col = (width + 2 * pad_w - dil_patch_w) / stride_w + 1; int num_kernels = channels * height * width; // To avoid involving atomic operations, we will launch one kernel per // bottom dimension, and then in the kernel add up the top dimensions. if(dilation_h != 1 || dilation_w != 1){ dilated_col2im_kernel<<<GET_BLOCKS(num_kernels), CUDA_NUM_THREADS>>>( num_kernels, data_col, height, width, channels, patch_h, patch_w, dilation_h, dilation_w, pad_h, pad_w, stride_h, stride_w, height_col, width_col, data_im); } else{ col2im_kernel<<<GET_BLOCKS(num_kernels), CUDA_NUM_THREADS>>>( num_kernels, data_col, height, width, channels, patch_h, patch_w, pad_h, pad_w, stride_h, stride_w, height_col, width_col, data_im); } } // Theano op code // Authors: Arjun Jain, Frederic Bastien, Jan Schluter // Reference code: https://github.com/BVLC/caffe/blob/master/src/caffe/layers/conv_layer.cu // and https://github.com/torch/cunn/blob/master/SpatialConvolutionMM.cu CudaNdarray* corrMM(CudaNdarray *const bottom, CudaNdarray *const weight, CudaNdarray *const top, const int direction, const int dH = 1, const int dW = 1, const int dilH = 1, const int dilW = 1, const int padH = 0, const int padW = 0) { if (bottom->nd != 4) { PyErr_SetString(PyExc_ValueError, "GpuCorrMM requires bottom of 4D"); return NULL; } if (!CudaNdarray_is_c_contiguous(bottom)) { PyErr_Format(PyExc_ValueError, "GpuCorrMM requires bottom to be C-contiguous, " "but strides are: %d %d %d %d\n", CudaNdarray_HOST_STRIDES(bottom)[0], CudaNdarray_HOST_STRIDES(bottom)[1], CudaNdarray_HOST_STRIDES(bottom)[2], CudaNdarray_HOST_STRIDES(bottom)[3]); return NULL; } if (weight->nd != 4) { PyErr_SetString(PyExc_ValueError, "GpuCorrMM requires weight of 4D"); return NULL; } if (!CudaNdarray_is_c_contiguous(weight)) { PyErr_Format(PyExc_ValueError, "GpuCorrMM requires weight to be C-contiguous, " "but strides are: %d %d %d %d\n", CudaNdarray_HOST_STRIDES(weight)[0], CudaNdarray_HOST_STRIDES(weight)[1], CudaNdarray_HOST_STRIDES(weight)[2], CudaNdarray_HOST_STRIDES(weight)[3]); return NULL; } if (top->nd != 4) { PyErr_SetString(PyExc_ValueError, "GpuCorrMM requires top of 4D"); return NULL; } if (!CudaNdarray_is_c_contiguous(top)) { PyErr_Format(PyExc_ValueError, "GpuCorrMM requires top to be C-contiguous, " "but strides are: %d %d %d %d\n", CudaNdarray_HOST_STRIDES(top)[0], CudaNdarray_HOST_STRIDES(top)[1], CudaNdarray_HOST_STRIDES(top)[2], CudaNdarray_HOST_STRIDES(top)[3]); return NULL; } // Extract some shape information for later and check shape consistency // bottom: (batchSize, nChannels, bottomHeight, bottomWidth) const int batchSize = CudaNdarray_HOST_DIMS(bottom)[0]; const int nChannels = CudaNdarray_HOST_DIMS(bottom)[1]; const int bottomHeight = CudaNdarray_HOST_DIMS(bottom)[2]; const int bottomWidth = CudaNdarray_HOST_DIMS(bottom)[3]; // weights: (nFilters, nChannels, rows, columns) const int nFilters = CudaNdarray_HOST_DIMS(weight)[0]; const int kH = CudaNdarray_HOST_DIMS(weight)[2]; const int kW = CudaNdarray_HOST_DIMS(weight)[3]; if (nChannels != CudaNdarray_HOST_DIMS(weight)[1]) { PyErr_SetString(PyExc_ValueError, "GpuCorrMM images and kernel must have the same stack size\n"); return NULL; } // implicit dilated filter const int dil_kH = (kH - 1) * dilH + 1; const int dil_kW = (kW - 1) * dilW + 1; // top: (batchSize, nFilters, topHeight, topWidth) const int topHeightNoDH = (bottomHeight + 2*padH - dil_kH); const int topWidthNoDW = (bottomWidth + 2*padW - dil_kW); // the above values might be negative so we need to use Python-like // flooring integer division to be compatible with get_conv_output. // note: this macro implements Python's // for negative x only #define _CONV_FLOORDIV_X(x,y) ((x < 0) ? (- ((-x) / y) - (((-x) % y) == 0 ? 0 : 1)) : (x / y)) const int topHeight = _CONV_FLOORDIV_X(topHeightNoDH, dH) + 1; const int topWidth = _CONV_FLOORDIV_X(topWidthNoDW, dW) + 1; #undef _CONV_FLOORDIV if (batchSize != CudaNdarray_HOST_DIMS(top)[0] || nFilters != CudaNdarray_HOST_DIMS(top)[1] || topHeight != CudaNdarray_HOST_DIMS(top)[2] || topWidth != CudaNdarray_HOST_DIMS(top)[3]) { PyErr_Format(PyExc_ValueError, "GpuCorrMM shape inconsistency:\n" " bottom shape: %d %d %d %d\n" " weight shape: %d %d %d %d\n" " top shape: %d %d %d %d (expected %d %d %d %d)\n", batchSize, nChannels, bottomHeight, bottomWidth, nFilters, nChannels, kH, kW, CudaNdarray_HOST_DIMS(top)[0], CudaNdarray_HOST_DIMS(top)[1], CudaNdarray_HOST_DIMS(top)[2], CudaNdarray_HOST_DIMS(top)[3], batchSize, nFilters, topHeight, topWidth); return NULL; } // Create temporary columns int col_dim[2]; col_dim[0] = nChannels * kW * kH; col_dim[1] = topHeight * topWidth; CudaNdarray* col = (CudaNdarray*)CudaNdarray_NewDims(2, col_dim); if (NULL == col) { PyErr_Format(PyExc_RuntimeError, "GpuCorrMM failed to allocate working memory of %d x %d\n", col_dim[0], col_dim[1]); return NULL; } // Define some useful variables const int bottom_stride = CudaNdarray_HOST_STRIDES(bottom)[0]; const int top_stride = CudaNdarray_HOST_STRIDES(top)[0]; const int K_ = col_dim[0]; const int N_ = col_dim[1]; const int M_ = nFilters; const float one = 1.0f; const float zero = 0.0f; CudaNdarray *output; if (direction == 0) { // forward pass output = top; if (batchSize == 0 || nChannels == 0 || nFilters == 0) { cudaError_t err = cudaMemset(output->devdata, 0, CudaNdarray_SIZE(output) * sizeof(real)); if (err != cudaSuccess) { PyErr_Format(PyExc_RuntimeError, "GpuCorrMM could not fill the output with zeros: %s", cudaGetErrorString(err)); Py_DECREF(col); return NULL; } Py_DECREF(col); return output; } // valid correlation: im2col, then gemm // Iterate over batch for (int n = 0; n < batchSize; n++) { // First, im2col im2col(bottom->devdata + n * bottom_stride, nChannels, bottomHeight, bottomWidth, kH, kW, dilH, dilW, padH, padW, dH, dW, col->devdata); cudaError_t err = cudaGetLastError(); if (err != cudaSuccess) { PyErr_Format(PyExc_RuntimeError, "GpuCorrMM encountered a CUDA error in im2col: %s\n" "This could be a known bug in CUDA, please see the " "GpuCorrMM() documentation.\n", cudaGetErrorString(err)); Py_DECREF(col); return NULL; } // Second, gemm cublasStatus_t status = cublasSgemm(handle, CUBLAS_OP_N, CUBLAS_OP_N, N_, M_, K_, &one, col->devdata, N_, weight->devdata, K_, &zero, top->devdata + n * top_stride, N_); if (status != CUBLAS_STATUS_SUCCESS) { PyErr_Format(PyExc_RuntimeError, "GpuCorrMM encountered a CUBLAS error: %s\n" "This could be a known bug in CUDA, please see the " "GpuCorrMM() documentation.\n", cublasGetErrorString(status)); Py_DECREF(col); return NULL; } } /* // Original caffe code for comparison // https://github.com/BVLC/caffe/blob/master/src/caffe/layers/conv_layer.cu // Note that this is for grouped convolution; we can ignore groups here, // but the group-related offsets help explain what M_, N_ and K_ are int weight_offset = M_ * K_; int col_offset = K_ * N_; int top_offset = M_ * N_; for (int n = 0; n < num_; ++n) { // First, im2col im2col_gpu(bottom_data + bottom[i]->offset(n), channels_, height_, width_, kernel_h_, kernel_w_, pad_h_, pad_w_, stride_h_, stride_w_, col_data); // Second, innerproduct with groups for (int g = 0; g < group_; ++g) { caffe_gpu_gemm<Dtype>(CblasNoTrans, CblasNoTrans, M_, N_, K_, (Dtype)1., weight + weight_offset * g, col_data + col_offset * g, (Dtype)0., top_data + (*top)[i]->offset(n) + top_offset * g); == (see https://github.com/BVLC/caffe/blob/master/src/caffe/util/math_functions.cu#L16) cublasSgemm(CUBLAS_OP_N, CUBLAS_OP_N, N_, M_, K_, 1., col_data + col_offset * g, N_, weight + weight_offset * g, K_, 0., top_data + (*top)[i]->offset(n) + top_offset * g, N_); } } */ } else if (direction == 1) { // backprop wrt. weights output = weight; if (batchSize == 0 || nChannels == 0 || nFilters == 0) { cudaError_t err = cudaMemset(output->devdata, 0, CudaNdarray_SIZE(output) * sizeof(real)); if (err != cudaSuccess) { PyErr_Format(PyExc_RuntimeError, "GpuCorrMM grad wrt. weights could not fill the output with zeros: %s", cudaGetErrorString(err)); Py_DECREF(col); return NULL; } Py_DECREF(col); return output; } // valid convolution: im2col, then gemm // Iterate over batch for (int n = 0; n < batchSize; n++) { // First, im2col im2col(bottom->devdata + n * bottom_stride, nChannels, bottomHeight, bottomWidth, kH, kW, dilH, dilW, padH, padW, dH, dW, col->devdata); cudaError_t err = cudaGetLastError(); if (err != cudaSuccess) { PyErr_Format(PyExc_RuntimeError, "GpuCorrMM encountered a CUDA error in im2col: %s\n" "This could be a known bug in CUDA, please see the " "GpuCorrMM() documentation.\n", cudaGetErrorString(err)); Py_DECREF(col); return NULL; } // Second, gemm // Note that we accumulate into weight. We do so by setting beta = 0 // for the first iteration and beta = 1 for subsequent ones. (This // is faster than setting weight to all zeros before the loop.) cublasStatus_t status = cublasSgemm(handle, CUBLAS_OP_T, CUBLAS_OP_N, K_, M_, N_, &one, col->devdata, N_, top->devdata + n * top_stride, N_, (n == 0) ? &zero : &one, weight->devdata, K_); if (status != CUBLAS_STATUS_SUCCESS) { PyErr_Format(PyExc_RuntimeError, "GpuCorrMM encountered a CUBLAS error: %s\n" "This could be a known bug in CUDA, please see the " "GpuCorrMM() documentation.\n", cublasGetErrorString(status)); Py_DECREF(col); return NULL; } } /* // Original caffe code for comparison // https://github.com/BVLC/caffe/blob/master/src/caffe/layers/conv_layer.cu // Note that this is for grouped convolution; we can ignore groups for (int n = 0; n < num_; ++n) { // Since we saved memory in the forward pass by not storing all col // data, we will need to recompute them. im2col_gpu(bottom_data + (*bottom)[i]->offset(n), channels_, height_, width_, kernel_h_, kernel_w_, pad_h_, pad_w_, stride_h_, stride_w_, col_data); // gradient w.r.t. weight. Note that we will accumulate diffs. for (int g = 0; g < group_; ++g) { caffe_gpu_gemm<Dtype>(CblasNoTrans, CblasTrans, M_, K_, N_, (Dtype)1., top_diff + top[i]->offset(n) + top_offset * g, col_data + col_offset * g, (Dtype)1., weight_diff + weight_offset * g); == (see https://github.com/BVLC/caffe/blob/master/src/caffe/util/math_functions.cu#L16) cublasSgemm(CUBLAS_OP_T, CUBLAS_OP_N, K_, M_, N_, 1.0, col_data + col_offset * g, N_, top_diff + top[i]->offset(n) + top_offset * g, N_, 1.0, weight_diff + weight_offset * g, K_); } } */ } else if (direction == 2) { // backprop wrt. inputs output = bottom; if (batchSize == 0 || nChannels == 0 || nFilters == 0) { cudaError_t err = cudaMemset(output->devdata, 0, CudaNdarray_SIZE(output) * sizeof(real)); if (err != cudaSuccess) { PyErr_Format(PyExc_RuntimeError, "GpuCorrMM grad wrt. inputs could not fill the output with zeros: %s", cudaGetErrorString(err)); Py_DECREF(col); return NULL; } Py_DECREF(col); return output; } // full convolution: gemm, then col2im // Iterate over batch for (int n = 0; n < batchSize; n++) { // gemm into columns cublasStatus_t status = cublasSgemm(handle, CUBLAS_OP_N, CUBLAS_OP_T, N_, K_, M_, &one, top->devdata + n * top_stride, N_, weight->devdata, K_, &zero, col->devdata, N_); if (status != CUBLAS_STATUS_SUCCESS) { PyErr_Format(PyExc_RuntimeError, "GpuCorrMM encountered a CUBLAS error: %s\n" "This could be a known bug in CUDA, please see the " "GpuCorrMM() documentation.\n", cublasGetErrorString(status)); Py_DECREF(col); return NULL; } // col2im back to the data col2im(col->devdata, nChannels, bottomHeight, bottomWidth, kH, kW, dilH, dilW, padH, padW, dH, dW, bottom->devdata + n * bottom_stride); cudaError_t err = cudaGetLastError(); if (err != cudaSuccess) { PyErr_Format(PyExc_RuntimeError, "GpuCorrMM encountered a CUDA error in col2im: %s\n" "This could be a known bug in CUDA, please see the " "GpuCorrMM() documentation.\n", cudaGetErrorString(err)); Py_DECREF(col); return NULL; } } /* // Original caffe code for comparison // https://github.com/BVLC/caffe/blob/master/src/caffe/layers/conv_layer.cu for (int n = 0; n < num_; ++n) { // gradient w.r.t. bottom data, if necessary if (propagate_down[i]) { for (int g = 0; g < group_; ++g) { caffe_gpu_gemm<Dtype>(CblasTrans, CblasNoTrans, K_, N_, M_, (Dtype)1., weight + weight_offset * g, top_diff + top[i]->offset(n) + top_offset * g, (Dtype)0., col_diff + col_offset * g); == (see https://github.com/BVLC/caffe/blob/master/src/caffe/util/math_functions.cu#L16) cublasSgemm(CUBLAS_OP_N, CUBLAS_OP_T, N_, K_, M_, 1., top_diff + top[i]->offset(n) + top_offset * g, N_, weight + weight_offset * g, K_, 0., col_diff + col_offset * g, N_); } // col2im back to the data col2im_gpu(col_diff, channels_, height_, width_, kernel_h_, kernel_w_, pad_h_, pad_w_, stride_h_, stride_w_, bottom_diff + (*bottom)[i]->offset(n)); } } */ } // Free temporary columns Py_DECREF(col); // Note that we don't change the refcount of the output matrix here. Output // (re)allocation and refcounting is done in BaseGpuCorrMM.c_code_helper(); // in here output is just aliased to one of bottom, weights, or top. return output; }
be0ca1e33af09180ae9d338346f8072837bd4831.hip
// !!! This is a file automatically generated by hipify!!! #include <hip/hip_runtime_api.h> #include <device_launch_parameters.h> #include <hip/driver_types.h> #include <host_defines.h> #include <opencv2/core/core.hpp> #include <opencv2/core/gpumat.hpp> #include <opencv2/core/mat.hpp> #include <opencv2/core/operations.hpp> #include <opencv2/core/types_c.h> #include <opencv2/gpu/gpu.hpp> #include <opencv2/highgui/highgui.hpp> #include <opencv2/highgui/highgui_c.h> #include <opencv2/imgproc/imgproc.hpp> #include <opencv2/imgproc/types_c.h> #include <stddef.h> #include <stdio.h> #include <stdlib.h> #include <hip/hip_vector_types.h> #include <fstream> #include <iosfwd> #include <ostream> #include <sstream> #include <string> #include <vector> #include <iostream> #include "csvparser.h" using namespace std; using namespace cv; using namespace cv::gpu; struct Arguments { string project; string input; string output; int padding; int frames; string extension; int width; int height; int area_min; int area_max; int search_win_size; int blur_radius; int threshold_win_size; float threshold_ratio; string log; bool verbose; Arguments() : input("data/"), output("output.txt"), padding(7), frames(1000), extension( ".jpg"), width(640), height(480), area_min(200), area_max( 400), search_win_size(100), blur_radius(3), threshold_win_size( 25), threshold_ratio(0.9), log("wormSeg.log"), verbose(true) { } } cla; int findCentroidFrom1Image(cv::Mat, int*, int*, int*); template<typename T> string NumberToString(T pNumber) { ostringstream oOStrStream; oOStrStream << pNumber; return oOStrStream.str(); } string intToFileName(string fileNameFormat, int fileNumber) { string temp = NumberToString(fileNumber); return fileNameFormat.replace(fileNameFormat.size() - temp.size(), temp.size(), temp); } void func(const float*, float*, size_t, const size_t, int, int, int&, int&); int centroidRow = 0; int centroidCol = 0; void callKernel(const cv::gpu::GpuMat &src, cv::gpu::GpuMat &dst, int *pX, int *pY) { float* p = (float*) src.data; float* p2 = (float*) dst.data; func(p, p2, src.step, dst.step, src.cols, src.rows, centroidRow, centroidCol); // *pX = centroidRow; // *pY = centroidCol; // cout << "print row=" << *pX << endl; // cout << "print col=" << *pY << endl; } int cudaFindCentroid(cv::Mat src, int *pX, int *pY, int *pArea) { //GPU Mat... Copy from CPU memory to GPU memory... cv::gpu::GpuMat gpu_src(src); cv::gpu::GpuMat matAfterBlur; //Filters on GPU... cv::gpu::blur(gpu_src, matAfterBlur, Size(cla.blur_radius, cla.blur_radius)); cv::gpu::GpuMat matAfterThreshold; //Convert into Binary image on GPU... cv::gpu::threshold(matAfterBlur, matAfterThreshold, int(cla.threshold_ratio * 255), 255, THRESH_BINARY_INV); cv::gpu::GpuMat floatMatForKernel; matAfterThreshold.convertTo(floatMatForKernel, CV_32FC1); // cout << "i'm here" << endl; callKernel(floatMatForKernel, gpu_src, pX, pY); // //Copy from GPU memory to CPU memory... // if (*pX) { //// *pX = bRect.x + (bRect.width / 2); //// *pY = bRect.y + (bRect.height / 2); // *pArea = 10; // } else { // *pX = -1; // *pY = -1; // *pArea = -1; // } // // return 0; } int wormSegmenter() { fstream outputFile; outputFile.open(cla.output.c_str(), ios::out); int x = -1, y = -1, area = -1; int adjustX = 0, adjustY = 0; for (int fileNumber = 0; fileNumber < cla.frames; fileNumber++) { string fileName = cla.input + intToFileName("0000000", fileNumber) + cla.extension; cv::Mat src = cv::imread(fileName, CV_LOAD_IMAGE_GRAYSCALE); if (!src.data) { // cout << endl << "Exited." << endl; exit(1); } if ((x == -1) && (y == -1)) { findCentroidFrom1Image(src, &x, &y, &area); src = cv::imread(fileName, CV_LOAD_IMAGE_GRAYSCALE); adjustX = x - (cla.search_win_size / 2); adjustY = y - (cla.search_win_size / 2); } else { src = src( cv::Rect(x - (cla.search_win_size / 2), y - (cla.search_win_size / 2), cla.search_win_size, cla.search_win_size)); cudaFindCentroid(src, &x, &y, &area); if ((x > 0) && (y > 0)) { //std::cout << "writing file=" << fileNumber << "x=" << x << "y=" << y << endl; // x += adjustX; // y += adjustY; // adjustX = x - (cla.search_win_size / 2); // adjustY = y - (cla.search_win_size / 2); x = 153; y = 251; } } // cout << "writing file=" << fileNumber << "x=" << x << "y=" << y << endl; outputFile << fileNumber << ", " << x << ", " << y << ", " << area << endl; } outputFile.close(); return 0; } int findCentroidFrom1Image(cv::Mat src, int *pX, int *pY, int *pArea) { // Smoothing the image. blur(src, src, Size(cla.blur_radius, cla.blur_radius)); //Blur radius 3 in original java worm segmenter. // Convert the image into binary image. threshold(src, src, int(cla.threshold_ratio * 255), 255, THRESH_BINARY_INV); // Vector for storing contour vector<vector<Point> > contours; vector<Vec4i> hierarchy; // Find contours in the image. findContours(src, contours, hierarchy, CV_RETR_CCOMP, CV_CHAIN_APPROX_SIMPLE); if (contours.size() > 0) { int largest_contour_index = 0; int largest_area = 0; // Iterate through each contour. for (int i = 0; i < contours.size(); i++) { // Find the area of contour double a = contourArea(contours[i], false); if (a > largest_area) { largest_area = a; // Store the index of largest contour largest_contour_index = i; } } Rect bRect = boundingRect(contours[largest_contour_index]); *pX = bRect.x + (bRect.width / 2); *pY = bRect.y + (bRect.height / 2); *pArea = largest_area; } else { *pX = -1; *pY = -1; *pArea = -1; } return 0; } int main(int argc, char **argv) { int i = 0; // file, delimiter, first_line_is_header? CsvParser *csvparser = CsvParser_new("example_file.csv", ",", 0); CsvRow *row; std::vector<std::vector<float> > vec; while ((row = CsvParser_getRow(csvparser))) { std::vector<float> eachLine; //printf("==NEW LINE==\n"); const char **rowFields = CsvParser_getFields(row); for (i = 0; i < CsvParser_getNumFields(row); i++) { eachLine.push_back(atof(rowFields[i])); //printf("FIELD: %f\n", eachLine[i]); } vec.push_back(eachLine); // printf("\n"); CsvParser_destroy_row(row); } // printf("test=%f\n", vec[0][1]); // printf("test=%f\n", vec[1][0]); // printf("test=%f\n", vec[2][0]); // printf("test=%f\n", vec[3][0]); CsvParser_destroy(csvparser); int a = wormSegmenter(); return 0; } //#define arraySIZE 240 __device__ int edgesValues[100][100]; //__device__ int edgesValues[480][640]; __shared__ int counter; __constant__ const int maxContourPoints = 300; __global__ void funcKernel(const float* srcptr, float* dstptr, size_t srcstep, const size_t dststep, int cols, int rows, int* inputArray_d, int* outputArray_d) { int rowInd = blockIdx.y * blockDim.y + threadIdx.y; int colInd = blockIdx.x * blockDim.x + threadIdx.x; if (rowInd >= rows || colInd >= cols) return; const float* rowsrcptr = (const float *) (((char *) srcptr) + rowInd * srcstep); float val = rowsrcptr[colInd]; if ((rowInd > 2 && rowInd < (rows - 2)) && (colInd > 2 && colInd < (cols - 2))) { if (val == 255) { const float* rowsrcptrNxt = (const float *) (((char *) srcptr) + (rowInd + 1) * srcstep); const float* rowsrcptrPrev = (const float *) (((char *) srcptr) + (rowInd - 1) * srcstep); if (rowsrcptrPrev[colInd - 1] == 0 || rowsrcptrPrev[colInd] == 0 || rowsrcptrPrev[colInd + 1] == 0 || rowsrcptr[colInd - 1] == 0 || rowsrcptr[colInd - 1] == 0 || rowsrcptrNxt[colInd - 1] == 0 || rowsrcptrNxt[colInd] == 0 || rowsrcptrNxt[colInd + 1] == 0) { edgesValues[rowInd][colInd] = 1; } else { edgesValues[rowInd][colInd] = 0; } } } } __global__ void funcKernel2(const float* srcptr, float* dstptr, size_t srcstep, const size_t dststep, int cols, int rows, int* inputArray_d, int* outputArray_d, int *a, int *b, int *c) { int rowInd = blockIdx.y * blockDim.y + threadIdx.y; int colInd = blockIdx.x * blockDim.x + threadIdx.x; if (rowInd >= rows || colInd >= cols) return; counter = 0; int maxRow = 0; int minRow = rows; int minCol = cols; int maxCol = 0; for (int i = 0; i < rows; i++) { for (int j = 0; j < cols; j++) { if (edgesValues[i][j] == 1 && counter < maxContourPoints) { if (i < minRow) { minRow = i; } if (i > maxRow) { maxRow = i; } if (j < minCol) { minCol = j; } if (j > maxCol) { maxCol = j; } counter++; } } } int centroidRow = (minRow + maxRow) / 2; int centroidCol = (minCol + maxCol) / 2; // printf("%d,%d", centroidRow, centroidCol); *a = centroidRow; *b = centroidCol; *c = *a + *b; } int divUp(int a, unsigned int b) { return (a + b - 1) / b; } void func(const float* srcptr, float* dstptr, size_t srcstep, const size_t dststep, int cols, int rows, int& centroidRow, int& centroidCol) { dim3 blDim(32, 8); dim3 grDim(divUp(cols, blDim.x), divUp(rows, blDim.y)); int inputArray_h[rows * cols]; int outputArray_h[rows * cols]; int* cRowNumber; int* cColNumber; for (int j = 0; j < rows * cols; j++) { inputArray_h[j] = 0; } int int_BYTES = sizeof(int); //allocate GPU memory hipMalloc((void**) &cRowNumber, int_BYTES); hipMalloc((void**) &cColNumber, int_BYTES); hipMemcpy(cRowNumber, inputArray_h, int_BYTES, hipMemcpyHostToDevice); hipLaunchKernelGGL(( funcKernel), dim3(grDim), dim3(blDim), 0, 0, srcptr, dstptr, srcstep, dststep, cols, rows, cRowNumber, cColNumber); // hipDeviceSynchronize(); int *dev_a, *dev_b, *dev_c; // device copies of a, b, c int size = sizeof(int); // we need space for an integer // allocate device copies of a, b, c hipMalloc((void**) &dev_a, size); hipMalloc((void**) &dev_b, size); hipMalloc((void**) &dev_c, size); // copy inputs to device hipLaunchKernelGGL(( funcKernel2), dim3(1), dim3(1), 0, 0, srcptr, dstptr, srcstep, dststep, cols, rows, cRowNumber, cColNumber, dev_a, dev_b, dev_c); }
be0ca1e33af09180ae9d338346f8072837bd4831.cu
#include <cuda_runtime_api.h> #include <device_launch_parameters.h> #include <driver_types.h> #include <host_defines.h> #include <opencv2/core/core.hpp> #include <opencv2/core/gpumat.hpp> #include <opencv2/core/mat.hpp> #include <opencv2/core/operations.hpp> #include <opencv2/core/types_c.h> #include <opencv2/gpu/gpu.hpp> #include <opencv2/highgui/highgui.hpp> #include <opencv2/highgui/highgui_c.h> #include <opencv2/imgproc/imgproc.hpp> #include <opencv2/imgproc/types_c.h> #include <stddef.h> #include <stdio.h> #include <stdlib.h> #include <vector_types.h> #include <fstream> #include <iosfwd> #include <ostream> #include <sstream> #include <string> #include <vector> #include <iostream> #include "csvparser.h" using namespace std; using namespace cv; using namespace cv::gpu; struct Arguments { string project; string input; string output; int padding; int frames; string extension; int width; int height; int area_min; int area_max; int search_win_size; int blur_radius; int threshold_win_size; float threshold_ratio; string log; bool verbose; Arguments() : input("data/"), output("output.txt"), padding(7), frames(1000), extension( ".jpg"), width(640), height(480), area_min(200), area_max( 400), search_win_size(100), blur_radius(3), threshold_win_size( 25), threshold_ratio(0.9), log("wormSeg.log"), verbose(true) { } } cla; int findCentroidFrom1Image(cv::Mat, int*, int*, int*); template<typename T> string NumberToString(T pNumber) { ostringstream oOStrStream; oOStrStream << pNumber; return oOStrStream.str(); } string intToFileName(string fileNameFormat, int fileNumber) { string temp = NumberToString(fileNumber); return fileNameFormat.replace(fileNameFormat.size() - temp.size(), temp.size(), temp); } void func(const float*, float*, size_t, const size_t, int, int, int&, int&); int centroidRow = 0; int centroidCol = 0; void callKernel(const cv::gpu::GpuMat &src, cv::gpu::GpuMat &dst, int *pX, int *pY) { float* p = (float*) src.data; float* p2 = (float*) dst.data; func(p, p2, src.step, dst.step, src.cols, src.rows, centroidRow, centroidCol); // *pX = centroidRow; // *pY = centroidCol; // cout << "print row=" << *pX << endl; // cout << "print col=" << *pY << endl; } int cudaFindCentroid(cv::Mat src, int *pX, int *pY, int *pArea) { //GPU Mat... Copy from CPU memory to GPU memory... cv::gpu::GpuMat gpu_src(src); cv::gpu::GpuMat matAfterBlur; //Filters on GPU... cv::gpu::blur(gpu_src, matAfterBlur, Size(cla.blur_radius, cla.blur_radius)); cv::gpu::GpuMat matAfterThreshold; //Convert into Binary image on GPU... cv::gpu::threshold(matAfterBlur, matAfterThreshold, int(cla.threshold_ratio * 255), 255, THRESH_BINARY_INV); cv::gpu::GpuMat floatMatForKernel; matAfterThreshold.convertTo(floatMatForKernel, CV_32FC1); // cout << "i'm here" << endl; callKernel(floatMatForKernel, gpu_src, pX, pY); // //Copy from GPU memory to CPU memory... // if (*pX) { //// *pX = bRect.x + (bRect.width / 2); //// *pY = bRect.y + (bRect.height / 2); // *pArea = 10; // } else { // *pX = -1; // *pY = -1; // *pArea = -1; // } // // return 0; } int wormSegmenter() { fstream outputFile; outputFile.open(cla.output.c_str(), ios::out); int x = -1, y = -1, area = -1; int adjustX = 0, adjustY = 0; for (int fileNumber = 0; fileNumber < cla.frames; fileNumber++) { string fileName = cla.input + intToFileName("0000000", fileNumber) + cla.extension; cv::Mat src = cv::imread(fileName, CV_LOAD_IMAGE_GRAYSCALE); if (!src.data) { // cout << endl << "Exited." << endl; exit(1); } if ((x == -1) && (y == -1)) { findCentroidFrom1Image(src, &x, &y, &area); src = cv::imread(fileName, CV_LOAD_IMAGE_GRAYSCALE); adjustX = x - (cla.search_win_size / 2); adjustY = y - (cla.search_win_size / 2); } else { src = src( cv::Rect(x - (cla.search_win_size / 2), y - (cla.search_win_size / 2), cla.search_win_size, cla.search_win_size)); cudaFindCentroid(src, &x, &y, &area); if ((x > 0) && (y > 0)) { //std::cout << "writing file=" << fileNumber << "x=" << x << "y=" << y << endl; // x += adjustX; // y += adjustY; // adjustX = x - (cla.search_win_size / 2); // adjustY = y - (cla.search_win_size / 2); x = 153; y = 251; } } // cout << "writing file=" << fileNumber << "x=" << x << "y=" << y << endl; outputFile << fileNumber << ", " << x << ", " << y << ", " << area << endl; } outputFile.close(); return 0; } int findCentroidFrom1Image(cv::Mat src, int *pX, int *pY, int *pArea) { // Smoothing the image. blur(src, src, Size(cla.blur_radius, cla.blur_radius)); //Blur radius 3 in original java worm segmenter. // Convert the image into binary image. threshold(src, src, int(cla.threshold_ratio * 255), 255, THRESH_BINARY_INV); // Vector for storing contour vector<vector<Point> > contours; vector<Vec4i> hierarchy; // Find contours in the image. findContours(src, contours, hierarchy, CV_RETR_CCOMP, CV_CHAIN_APPROX_SIMPLE); if (contours.size() > 0) { int largest_contour_index = 0; int largest_area = 0; // Iterate through each contour. for (int i = 0; i < contours.size(); i++) { // Find the area of contour double a = contourArea(contours[i], false); if (a > largest_area) { largest_area = a; // Store the index of largest contour largest_contour_index = i; } } Rect bRect = boundingRect(contours[largest_contour_index]); *pX = bRect.x + (bRect.width / 2); *pY = bRect.y + (bRect.height / 2); *pArea = largest_area; } else { *pX = -1; *pY = -1; *pArea = -1; } return 0; } int main(int argc, char **argv) { int i = 0; // file, delimiter, first_line_is_header? CsvParser *csvparser = CsvParser_new("example_file.csv", ",", 0); CsvRow *row; std::vector<std::vector<float> > vec; while ((row = CsvParser_getRow(csvparser))) { std::vector<float> eachLine; //printf("==NEW LINE==\n"); const char **rowFields = CsvParser_getFields(row); for (i = 0; i < CsvParser_getNumFields(row); i++) { eachLine.push_back(atof(rowFields[i])); //printf("FIELD: %f\n", eachLine[i]); } vec.push_back(eachLine); // printf("\n"); CsvParser_destroy_row(row); } // printf("test=%f\n", vec[0][1]); // printf("test=%f\n", vec[1][0]); // printf("test=%f\n", vec[2][0]); // printf("test=%f\n", vec[3][0]); CsvParser_destroy(csvparser); int a = wormSegmenter(); return 0; } //#define arraySIZE 240 __device__ int edgesValues[100][100]; //__device__ int edgesValues[480][640]; __shared__ int counter; __constant__ const int maxContourPoints = 300; __global__ void funcKernel(const float* srcptr, float* dstptr, size_t srcstep, const size_t dststep, int cols, int rows, int* inputArray_d, int* outputArray_d) { int rowInd = blockIdx.y * blockDim.y + threadIdx.y; int colInd = blockIdx.x * blockDim.x + threadIdx.x; if (rowInd >= rows || colInd >= cols) return; const float* rowsrcptr = (const float *) (((char *) srcptr) + rowInd * srcstep); float val = rowsrcptr[colInd]; if ((rowInd > 2 && rowInd < (rows - 2)) && (colInd > 2 && colInd < (cols - 2))) { if (val == 255) { const float* rowsrcptrNxt = (const float *) (((char *) srcptr) + (rowInd + 1) * srcstep); const float* rowsrcptrPrev = (const float *) (((char *) srcptr) + (rowInd - 1) * srcstep); if (rowsrcptrPrev[colInd - 1] == 0 || rowsrcptrPrev[colInd] == 0 || rowsrcptrPrev[colInd + 1] == 0 || rowsrcptr[colInd - 1] == 0 || rowsrcptr[colInd - 1] == 0 || rowsrcptrNxt[colInd - 1] == 0 || rowsrcptrNxt[colInd] == 0 || rowsrcptrNxt[colInd + 1] == 0) { edgesValues[rowInd][colInd] = 1; } else { edgesValues[rowInd][colInd] = 0; } } } } __global__ void funcKernel2(const float* srcptr, float* dstptr, size_t srcstep, const size_t dststep, int cols, int rows, int* inputArray_d, int* outputArray_d, int *a, int *b, int *c) { int rowInd = blockIdx.y * blockDim.y + threadIdx.y; int colInd = blockIdx.x * blockDim.x + threadIdx.x; if (rowInd >= rows || colInd >= cols) return; counter = 0; int maxRow = 0; int minRow = rows; int minCol = cols; int maxCol = 0; for (int i = 0; i < rows; i++) { for (int j = 0; j < cols; j++) { if (edgesValues[i][j] == 1 && counter < maxContourPoints) { if (i < minRow) { minRow = i; } if (i > maxRow) { maxRow = i; } if (j < minCol) { minCol = j; } if (j > maxCol) { maxCol = j; } counter++; } } } int centroidRow = (minRow + maxRow) / 2; int centroidCol = (minCol + maxCol) / 2; // printf("%d,%d", centroidRow, centroidCol); *a = centroidRow; *b = centroidCol; *c = *a + *b; } int divUp(int a, unsigned int b) { return (a + b - 1) / b; } void func(const float* srcptr, float* dstptr, size_t srcstep, const size_t dststep, int cols, int rows, int& centroidRow, int& centroidCol) { dim3 blDim(32, 8); dim3 grDim(divUp(cols, blDim.x), divUp(rows, blDim.y)); int inputArray_h[rows * cols]; int outputArray_h[rows * cols]; int* cRowNumber; int* cColNumber; for (int j = 0; j < rows * cols; j++) { inputArray_h[j] = 0; } int int_BYTES = sizeof(int); //allocate GPU memory cudaMalloc((void**) &cRowNumber, int_BYTES); cudaMalloc((void**) &cColNumber, int_BYTES); cudaMemcpy(cRowNumber, inputArray_h, int_BYTES, cudaMemcpyHostToDevice); funcKernel<<<grDim, blDim>>>(srcptr, dstptr, srcstep, dststep, cols, rows, cRowNumber, cColNumber); // cudaDeviceSynchronize(); int *dev_a, *dev_b, *dev_c; // device copies of a, b, c int size = sizeof(int); // we need space for an integer // allocate device copies of a, b, c cudaMalloc((void**) &dev_a, size); cudaMalloc((void**) &dev_b, size); cudaMalloc((void**) &dev_c, size); // copy inputs to device funcKernel2<<<1, 1>>>(srcptr, dstptr, srcstep, dststep, cols, rows, cRowNumber, cColNumber, dev_a, dev_b, dev_c); }
99f1629a9e49cc45cf437e2ee6a9b2f50f03adea.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include "checkerboard/cuda/refineLines.h" __global__ void RefineLinesKernel(RefineLinesParams params, int *lines, int *linesPointCount, int *linePoints, int *discardedLinePoints, int *discardedLinePointCount) { int threadId = (blockIdx.x * blockDim.x) + threadIdx.x; int threads = params.count; if (threadId >= threads) return; // point // int px, py, px2, py2; // px = lines[threadId * 2] % params.resolution.x; // py = lines[threadId * 2] / params.resolution.x; // px2 = lines[threadId * 2 + 1] % params.resolution.x; // py2 = lines[threadId * 2 + 1] / params.resolution.x; int point1 = lines[threadId * 2]; int point2 = lines[threadId * 2 + 1]; for (int i = 0; i < params.count; i++) { if (threadId == i || (linesPointCount[threadId] >= linesPointCount[i])) continue; int linePoint1 = lines[i * 2]; int linePoint2 = lines[i * 2 + 1]; bool pxFound = false; bool px2Found = false; for (int j = 0; j < linesPointCount[i]; j++) { if (point1 == linePoints[i * 30 + j]) pxFound = true; if (point2 == linePoints[i * 30 + j]) px2Found = true; } } int index = atomicAdd(discardedLinePointCount, 1); discardedLinePoints[index] = threadId; } void RefineLines(RefineLinesParams params, int *lines, int *linesPointCount, int *linePoints, int *discardedLinePoints, int *discardedLinePointCount) { int threads = params.count; int blocks = ceil(threads / 128.0f); hipLaunchKernelGGL(( RefineLinesKernel), dim3(blocks), dim3(128.0f), 0, 0, params, lines, linesPointCount, linePoints, discardedLinePoints, discardedLinePointCount); }
99f1629a9e49cc45cf437e2ee6a9b2f50f03adea.cu
#include "checkerboard/cuda/refineLines.h" __global__ void RefineLinesKernel(RefineLinesParams params, int *lines, int *linesPointCount, int *linePoints, int *discardedLinePoints, int *discardedLinePointCount) { int threadId = (blockIdx.x * blockDim.x) + threadIdx.x; int threads = params.count; if (threadId >= threads) return; // point // int px, py, px2, py2; // px = lines[threadId * 2] % params.resolution.x; // py = lines[threadId * 2] / params.resolution.x; // px2 = lines[threadId * 2 + 1] % params.resolution.x; // py2 = lines[threadId * 2 + 1] / params.resolution.x; int point1 = lines[threadId * 2]; int point2 = lines[threadId * 2 + 1]; for (int i = 0; i < params.count; i++) { if (threadId == i || (linesPointCount[threadId] >= linesPointCount[i])) continue; int linePoint1 = lines[i * 2]; int linePoint2 = lines[i * 2 + 1]; bool pxFound = false; bool px2Found = false; for (int j = 0; j < linesPointCount[i]; j++) { if (point1 == linePoints[i * 30 + j]) pxFound = true; if (point2 == linePoints[i * 30 + j]) px2Found = true; } } int index = atomicAdd(discardedLinePointCount, 1); discardedLinePoints[index] = threadId; } void RefineLines(RefineLinesParams params, int *lines, int *linesPointCount, int *linePoints, int *discardedLinePoints, int *discardedLinePointCount) { int threads = params.count; int blocks = ceil(threads / 128.0f); RefineLinesKernel<<<blocks, 128.0f>>>(params, lines, linesPointCount, linePoints, discardedLinePoints, discardedLinePointCount); }
e90dd8fa90295ff59ef320394c8edebdc92b61a8.hip
// !!! This is a file automatically generated by hipify!!! /************************************************************************* /* ECE 285: GPU Programmming 2019 Winter quarter /* Author and Instructer: Hou Wang /* Copyright 2019 /* University of California, San Diego /*************************************************************************/ #include "cnn.h" #include <algorithm> #include <iostream> #include <hip/hip_runtime.h> #include <hip/hip_runtime.h> #include <string> #include <cmath> #include <ctime> #define CURSTATE_LEN 233 #define BLOCK_SIZE 16 namespace cnn { const std::string DELIMITER = "====================================================="; __global__ void matrixMultiplication(float *a, float *b, float *c, int m, int n, int k) { int row = blockIdx.y * blockDim.y + threadIdx.y; int col = blockIdx.x * blockDim.x + threadIdx.x; float sum = 0; if (col < k && row < m) { for (int i = 0; i < n; i++) { sum += a[row * n + i] * b[i * k + col]; } c[row * k + col] = sum; } } // A: m x n, B:m x 1 __global__ void matrixBias(float *d_A, float *d_B, int m, int n) { int idx = threadIdx.x + blockIdx.x * blockDim.x; int idy = threadIdx.y + blockIdx.y * blockDim.y; if (idx < n && idy < m) { int index = idy * n + idx; d_A[index] += d_B[idy]; } } __global__ void reluActivation(float *d_AL, int m) { int idx = threadIdx.x + blockIdx.x * blockDim.x; if (idx < m) { float val = d_AL[idx]; d_AL[idx] = val > 0 ? val : 0; } } __global__ void softmaxActivation(float *d_AL, int m) { int idx = threadIdx.x + blockIdx.x * blockDim.x; float exp = expf(d_AL[idx]); d_AL[idx] = exp; float sum = 0; // compute sum int startIdx = idx % CATEGORIES; int end = startIdx + CATEGORIES; for (int i = startIdx; i < end; ++i) { sum += d_AL[i]; } float sm = exp / sum; d_AL[idx] = sm; } __global__ void mulTranspose(float *odata, float *idata, int width, int height) { __shared__ float block[BLOCK_SIZE][BLOCK_SIZE + 1]; // read the matrix tile into shared memory unsigned int xIndex = blockIdx.x * BLOCK_SIZE + threadIdx.x; unsigned int yIndex = blockIdx.y * BLOCK_SIZE + threadIdx.y; if ((xIndex < width) && (yIndex < height)) { unsigned int index_in = yIndex * width + xIndex; block[threadIdx.y][threadIdx.x] = idata[index_in]; } __syncthreads(); // write the transposed matrix tile to global memory xIndex = blockIdx.y * BLOCK_SIZE + threadIdx.x; yIndex = blockIdx.x * BLOCK_SIZE + threadIdx.y; if ((xIndex < height) && (yIndex < width)) { unsigned int index_out = yIndex * height + xIndex; odata[index_out] = block[threadIdx.x][threadIdx.y]; } } __global__ void setup_rand(hiprandState_t* state, int w, int h, int c) { int idxX = blockIdx.x * blockDim.x + threadIdx.x; int idxY = blockIdx.x * blockDim.y + threadIdx.y; int idxZ = blockIdx.z * blockDim.z + threadIdx.z; if (idxX < w && idxY < h && idxZ < c) { int idx = idxZ * (w * h) + idxY * w + idxX; // TODO: in final run, remove CURSTATE_LEN limit int curS = idx; hiprand_init((unsigned long long)clock() + curS, curS, 0, &state[curS]); } } __global__ void random_init(hiprandState_t* state, float* W, int w, int h, int c, float range) { int idxX = blockIdx.x * blockDim.x + threadIdx.x; int idxY = blockIdx.y * blockDim.y + threadIdx.y; int idxZ = blockIdx.z * blockDim.z + threadIdx.z; if (idxX < w && idxY < h && idxZ < c) { int idx = idxZ * (w * h) + idxY * w + idxX; hiprandState_t localState = state[idx]; float val = hiprand_uniform(&localState) * range; W[idx] = val; state[idx] = localState; } } // A = Y: m x n __global__ void calCrossEntropyLoss (float* d_out, float *d_A, float *d_Y, int m, int n) { int nidx = threadIdx.x + blockDim.x * blockIdx.x; if (nidx < n) { for (int midx = 0; midx < m; ++midx) { int curIdx = midx * n + nidx; if (d_Y[curIdx] != 0) { d_out[midx] += -logf(d_A[curIdx]); break; } } } } // A = Y: m x n __global__ void elementWiseMatrixDeduction(float* d_out, float* d_A, float *d_Y, int m, int n) { int mIdx = threadIdx.y + blockDim.y * blockIdx.y; int nIdx = threadIdx.x + blockDim.x * blockIdx.x; if (mIdx < m && nIdx < n) { int idx = mIdx * n + nIdx; d_out[idx] = d_A[idx] - d_Y[idx]; } } __global__ void matrixSumToOneAxis (float *d_dB, float *d_dZ, int m, int n) { int mIdx = threadIdx.x + blockIdx.x * blockDim.x; if (mIdx < m) { float sum = 0; for (int i = 0; i < n; ++i) { int idx = mIdx * n + i; d_dB[mIdx] += d_dZ[idx]; } } } __global__ void reluGrad(float *d_dZ, int m, int n) { int mIdx = threadIdx.y + blockIdx.y * blockDim.y; int nIdx = threadIdx.x + blockIdx.x * blockDim.x; if (mIdx < m && nIdx < n) { int idx = mIdx * n + nIdx; float val = d_dZ[idx]; d_dZ[idx] = (val > 0) ? val : 0; } } __global__ void elementWiseMatrixMultiplication(float *d_dW, float learningRate, int m, int n) { int mIdx = threadIdx.y + blockIdx.y * blockDim.y; int nIdx = threadIdx.x + blockIdx.x * blockDim.x; if (mIdx < m && nIdx < n) { int idx = mIdx * n + nIdx; d_dW[idx] *= learningRate; } } __global__ void countCorrectPredict(int* d_count, float* d_A, float* d_Y, int m, int n) { int nIdx = threadIdx.x + blockDim.x * blockIdx.x; if (nIdx < n) { bool isCorrect = false; float max = FLT_MIN; for (int i = 0; i < m; ++i) { int idx = m * i + nIdx; float cur = d_A[idx]; float curLabel = d_Y[idx]; if (cur > max) { isCorrect = d_Y[idx] != 0; max = cur; } } if (isCorrect) d_count[nIdx] = 1; else d_count[nIdx] = 0; } } // ========================================================================== // Kernel function wrappers: void transpose(float* A_v, int height, int width) { dim3 block(BLOCK_SIZE, BLOCK_SIZE); dim3 grid((width + block.x - 1) / block.x, (height + block.y - 1) / block.y); float *d_A_v, *d_out; CHECK(hipMalloc((void**)&d_A_v, height * width * sizeof(float))); CHECK(hipMalloc((void**)&d_out, height * width * sizeof(float))); CHECK(hipMemcpy(d_A_v, A_v, height * width * sizeof(float), hipMemcpyHostToDevice)); hipLaunchKernelGGL(( mulTranspose) , dim3(grid), dim3(block) , 0, 0, d_out, d_A_v, width, height); CHECK(hipMemcpy(A_v, d_out, height * width * sizeof(float), hipMemcpyDeviceToHost)); hipFree(d_A_v); hipFree(d_out); } // A: m x n, B: n x k void matrixMul(float* out, float* A, float* B, int m, int n, int k) { float *d_out, *d_A, *d_B; CHECK(hipMalloc((void**)&d_out, m * k * sizeof(float))); CHECK(hipMalloc((void**)&d_A, m * n * sizeof(float))); CHECK(hipMalloc((void**)&d_B, n * k * sizeof(float))); CHECK(hipMemcpy(d_A, A, m * n * sizeof(float), hipMemcpyHostToDevice)); CHECK(hipMemcpy(d_B, B, n * k * sizeof(float), hipMemcpyHostToDevice)); dim3 block(BLOCK_SIZE, BLOCK_SIZE); dim3 grid((k + block.x - 1) / block.x, (m + block.y - 1) / block.y); matrixMultiplication << <grid, block >> > (d_A, d_B, d_out, m, n, k); CHECK(hipMemcpy(out, d_out, m * k * sizeof(float), hipMemcpyDeviceToHost)); hipFree(d_out); hipFree(d_A); hipFree(d_B); } // A: m x n, B: m x 1 void matrixAddBias(float* A, float* B, int m, int n) { float* d_A, *d_B; CHECK(hipMalloc((void**)&d_A, m * n * sizeof(float))); CHECK(hipMalloc((void**)&d_B, m * sizeof(float))); CHECK(hipMemcpy(d_A, A, m * n * sizeof(float), hipMemcpyHostToDevice)); CHECK(hipMemcpy(d_B, B, m * sizeof(float), hipMemcpyHostToDevice)); dim3 block(BLOCK_SIZE, BLOCK_SIZE); dim3 grid((n + block.x - 1) / block.x, (m + block.y - 1) / block.y); hipLaunchKernelGGL(( matrixBias) , dim3(grid), dim3(block) , 0, 0, d_A, d_B, m, n); CHECK(hipMemcpy(A, d_A, m * n * sizeof(float), hipMemcpyDeviceToHost)); hipFree(d_A); hipFree(d_B); } // aply relu activation on all A units void relu(float* A, int len) { float* d_A; CHECK(hipMalloc((void**)&d_A, len * sizeof(float))); CHECK(hipMemcpy(d_A, A, len * sizeof(float), hipMemcpyHostToDevice)); dim3 block(BLOCK_SIZE * BLOCK_SIZE); dim3 grid((len + block.x - 1) / block.x); hipLaunchKernelGGL(( reluActivation) , dim3(grid), dim3(block) , 0, 0, d_A, len); CHECK(hipMemcpy(A, d_A, len * sizeof(float), hipMemcpyDeviceToHost)); hipFree(d_A); } void softmax(float* A, int len) { float *d_A; CHECK(hipMalloc((void**)&d_A, len * sizeof(float))); CHECK(hipMemcpy(d_A, A, len * sizeof(float), hipMemcpyHostToDevice)); dim3 block(150); dim3 grid((len + block.x - 1) / block.x); hipLaunchKernelGGL(( softmaxActivation) , dim3(grid), dim3(block) , 0, 0, d_A, len); CHECK(hipMemcpy(A, d_A, len * sizeof(float), hipMemcpyDeviceToHost)); hipFree(d_A); } // A = Y: m x n float crossEntropyLoss(float *A_final, float *Y_one_hot, int m, int n) { float *d_A, *d_Y, *d_out; float *out = (float *)malloc(m * sizeof(float)); CHECK(hipMalloc((void**)&d_A, m * n * sizeof(float))); CHECK(hipMalloc((void**)&d_Y, m * n * sizeof(float))); CHECK(hipMalloc((void**)&d_out, m * sizeof(float))); CHECK(hipMemcpy(d_A, A_final, m *n * sizeof(float), hipMemcpyHostToDevice)); CHECK(hipMemcpy(d_Y, Y_one_hot, m * n * sizeof(float), hipMemcpyHostToDevice)); dim3 block(BLOCK_SIZE); dim3 grid((n + block.x - 1) / block.x); hipLaunchKernelGGL(( calCrossEntropyLoss) , dim3(grid), dim3(block) , 0, 0, d_out, d_A, d_Y, m, n); CHECK(hipMemcpy(out, d_out, m * sizeof(float), hipMemcpyDeviceToHost)); float cost = 0; for (int i = 0; i < m; ++i) { cost += out[i]; } cost /= m; hipFree(d_A); hipFree(d_Y); hipFree(d_out); return cost; } // A = Y: m x n float* elementWiseMinus(float *A_final, float *Y_one_hot, int m, int n) { float *d_A, *d_Y, *d_out; float *out = (float *)malloc(m * n * sizeof(float)); CHECK(hipMalloc((void**)&d_A, m * n * sizeof(float))); CHECK(hipMalloc((void**)&d_Y, m * n * sizeof(float))); CHECK(hipMalloc((void**)&d_out, m * n * sizeof(float))); CHECK(hipMemcpy(d_A, A_final, m * n * sizeof(float), hipMemcpyHostToDevice)); CHECK(hipMemcpy(d_Y, Y_one_hot, m * n * sizeof(float), hipMemcpyHostToDevice)); dim3 block(BLOCK_SIZE, BLOCK_SIZE); dim3 grid((n + block.x - 1) / block.x, (m + block.y - 1) / block.y); elementWiseMatrixDeduction << <grid, block >> > (d_out, d_A, d_Y, m, n); CHECK(hipMemcpy(out, d_out, m * n * sizeof(float), hipMemcpyDeviceToHost)); hipFree(d_A); hipFree(d_Y); hipFree(d_out); return out; } // dB: m x 1, dZ: m x n void matrixSum(float* dB, float* dZ, int m, int n) { float *d_dB, *d_dZ; CHECK(hipMalloc((void**)&d_dZ, m * n * sizeof(float))); CHECK(hipMalloc((void**)&d_dB, m * sizeof(float))); CHECK(hipMemcpy(d_dZ, dZ, m * n * sizeof(float), hipMemcpyHostToDevice)); dim3 block(BLOCK_SIZE); dim3 grid((m + block.x - 1) / block.x); matrixSumToOneAxis << <grid, block >> > (d_dB, d_dZ, m, n); CHECK(hipMemcpy(dB, d_dB, m * sizeof(float), hipMemcpyDeviceToHost)); hipFree(d_dB); hipFree(d_dZ); } // dZ: m x n void reluGradFilter(float* dZ, int m, int n) { float *d_dZ; CHECK(hipMalloc((void**)&d_dZ, m * n * sizeof(float))); CHECK(hipMemcpy(d_dZ, dZ, m * n * sizeof(float), hipMemcpyHostToDevice)); dim3 block(BLOCK_SIZE, BLOCK_SIZE); dim3 grid((n + block.x - 1)/ block.x, (m + block.y - 1) / block.y); hipLaunchKernelGGL(( reluGrad) , dim3(grid), dim3(block) , 0, 0, d_dZ, m, n); CHECK(hipMemcpy(dZ, d_dZ, m * n * sizeof(float), hipMemcpyDeviceToHost)); hipFree(d_dZ); } void updateW(float* W, float *dW, int m, int n, float learningRate) { float *d_W, *d_dW, *d_out; CHECK(hipMalloc((void**)&d_W, m * n * sizeof(float))); CHECK(hipMalloc((void**)&d_dW, m * n * sizeof(float))); CHECK(hipMalloc((void**)&d_out, m * n * sizeof(float))); CHECK(hipMemcpy(d_W, W, m * n * sizeof(float), hipMemcpyHostToDevice)); CHECK(hipMemcpy(d_dW, dW, m * n * sizeof(float), hipMemcpyHostToDevice)); dim3 block(BLOCK_SIZE, BLOCK_SIZE); dim3 grid((n + block.x - 1) / block.x, (m + block.y - 1) / block.y); hipLaunchKernelGGL(( elementWiseMatrixMultiplication) , dim3(grid), dim3(block) , 0, 0, d_dW, (float)learningRate / BATCH_SIZE, m, n); hipLaunchKernelGGL(( elementWiseMatrixDeduction) , dim3(grid), dim3(block) , 0, 0, d_out, d_W, d_dW, m, n); CHECK(hipMemcpy(W, d_out, m * n * sizeof(float), hipMemcpyDeviceToHost)); hipFree(d_W); hipFree(d_dW); hipFree(d_out); } void updateB(float* B, float *dB, int m, float learningRate) { float *d_B, *d_dB, *d_out; CHECK(hipMalloc((void**)&d_B, m * sizeof(float))); CHECK(hipMalloc((void**)&d_dB, m * sizeof(float))); CHECK(hipMalloc((void**)&d_out, m * sizeof(float))); CHECK(hipMemcpy(d_B, B, m * sizeof(float), hipMemcpyHostToDevice)); CHECK(hipMemcpy(d_dB, dB, m * sizeof(float), hipMemcpyHostToDevice)); dim3 block(1, BLOCK_SIZE); dim3 grid(1, (m + block.y - 1) / block.y); elementWiseMatrixMultiplication << <grid, block >> > (d_dB, (float)learningRate / BATCH_SIZE, m, 1); elementWiseMatrixDeduction << <grid, block >> > (d_out, d_B, d_dB, m, 1); CHECK(hipMemcpy(B, d_out, m * sizeof(float), hipMemcpyDeviceToHost)); hipFree(d_B); hipFree(d_dB); hipFree(d_out); } int getCorrectCount(float* A_final, float* Y_one_hot, int m, int n) { float* d_A, *d_Y; int *count, *d_count; count = (int *)malloc(n * sizeof(int)); CHECK(hipMalloc((void**)&d_A, m * n *sizeof(float))); CHECK(hipMalloc((void**)&d_Y, m * n * sizeof(float))); CHECK(hipMalloc((void**)&d_count, n * sizeof(int))); CHECK(hipMemcpy(d_A, A_final, m * n * sizeof(float), hipMemcpyHostToDevice)); CHECK(hipMemcpy(d_Y, Y_one_hot, m * n * sizeof(float), hipMemcpyHostToDevice)); dim3 block(BLOCK_SIZE); dim3 grid((n + block.y - 1) / block.y); hipLaunchKernelGGL(( countCorrectPredict) , dim3(grid), dim3(block) , 0, 0, d_count, d_A, d_Y, m, n); CHECK(hipMemcpy(count, d_count, n * sizeof(float), hipMemcpyDeviceToHost)); int sum = 0; for (int i = 0; i < n; ++i) { sum += count[i]; } free(count); hipFree(d_A); hipFree(d_Y); hipFree(d_count); return sum; } // =================================================================================== // Class implementations void CNN::addLayer(Layer* layer) { this->layers.push_back(layer); } void ReLU::init() { // conv filter dimension: curNeuron * prevNeuron int w = prevShape[0]; int h = curShape[0]; int n = w * h; float range = sqrtf((float)2 / n); dim3 block(32, 8); dim3 grid((w + block.x - 1) / block.x, (h + block.y - 1) / block.y); hiprandState_t *randState; // set up cuda random generator CHECK(hipMalloc((void**)&randState, n * sizeof(hiprandState_t))); //test_setup_rand << <grid, block >> > (randState, w, h, 1); setup_rand << <grid, block >> > (randState, w, h, 1); this->W = (float*)malloc(n * sizeof(float)); this->b = (float*)malloc(h * sizeof(float)); memset(b, 0, h * sizeof(float)); float* d_W; CHECK(hipMalloc((void**) &d_W, n * sizeof(float))); hipLaunchKernelGGL(( random_init) , dim3(grid), dim3(block) , 0, 0, randState, d_W, w, h, 1, range); CHECK(hipMemcpy(W, d_W, n * sizeof(float), hipMemcpyDeviceToHost)); hipFree(d_W); hipFree(randState); } void SoftMax::init() { // conv filter dimension: curNeuron * prevNeuron int w = this->prevShape[0]; int h = this->curShape[0]; int n = w * h; float range = sqrtf((float)1 / n); dim3 block(32, 8); dim3 grid((w + block.x - 1) / block.x, (h + block.y - 1) / block.y); hiprandState_t* randState; // set up cuda random generator CHECK(hipMalloc((void**)&randState, n * sizeof(hiprandState_t))); setup_rand << <grid, block >> > (randState, w, h, 1); this->W = (float*)malloc(n * sizeof(float)); this->b = (float*)malloc(h * sizeof(float)); memset(b, 0, h * sizeof(float)); float* d_W; CHECK(hipMalloc((void**)&d_W, n * sizeof(float))); random_init << <grid, block >> > (randState, d_W, w, h, 1, range); CHECK(hipMemcpy(this->W, d_W, n * sizeof(float), hipMemcpyDeviceToHost)); hipFree(d_W); hipFree(randState); } void initMiniBatch(std::vector<int>& v, int size) { for (int i = 0; i < size; ++i) { v.push_back(i); } } void getCurrentBatch(std::vector<float*>& A, std::vector<int>& Y_batch, std::vector<int>& miniBatch, std::vector<float*>& X_train, std::vector<int>& Y_train, int mIdx, int numOfMiniBatches, int batch_size) { int startIdx = mIdx * batch_size; for (int i = 0; i < batch_size; ++i) { int curIdx = startIdx + i; int dataIdx = miniBatch[curIdx]; A.push_back(X_train[dataIdx]); Y_batch.push_back(Y_train[dataIdx]); } } // ========================================= // Helpers int predict(float* A, int Y) { return 0; } void vectorize(std::vector<float*>& A, float* A_v, int A_len, std::vector<int>& Y_batch, float* Y_one_hot, int Y_len) { for (int i = 0; i < A.size(); ++i) { for (int j = 0; j < A_len; ++j) { auto tmp = A[i][j]; A_v[i * A_len + j] = A[i][j];//array overflow, solved by resize img } } for (int i = 0; i < Y_batch.size(); ++i) { for (int j = 0; j < Y_len; ++j) { int idx = i * Y_len + j; Y_one_hot[idx] = j == Y_batch[i] ? 1 : 0; } } } // return activation of last layer A_final float* forwardPropagation(float* X , std::vector<Layer*>& layers) { float* A = X; for (Layer* layer : layers) { // A: batch_size * v A = layer->forward(A); } return A; } void backwardPropagation(float* dZ, std::vector<Layer*>& layers) { // backward prop and update parameters float* dAL = dZ; for (auto layer = layers.rbegin(); layer != layers.rend(); ++layer) { dAL = (*layer)->backward(dAL); } } void CNN::train(std::vector<float*>& X_train, std::vector<int>& Y_train, int epochs, int batch_size) { int m = X_train.size(); for (int eIdx = 0; eIdx < epochs; ++eIdx) { // loop through epochs int miniBatchCost = 0; std::vector<int> miniBatch; initMiniBatch(miniBatch, m); std::random_shuffle(miniBatch.begin(), miniBatch.end()); int numOfMiniBatches = (int) X_train.size() / batch_size; float avgMiniBatchCost = 0; int correctPred = 0; std::clock_t start = std::clock(); double duration; for (int mIdx = 0; mIdx < numOfMiniBatches; ++mIdx) { // forward propagate m samples in current batch // compute avg cost std::vector<float*> A; std::vector<int> Y_batch; getCurrentBatch(A, Y_batch, miniBatch, X_train, Y_train, mIdx, numOfMiniBatches, batch_size); // Y_batch stores current Y_train // A stores current activations computed from forward prop for all m samples int inputSize = inputShape[0] * inputShape[1] * inputShape[2]; int memsize = inputSize * batch_size; /* std::cout << "A_v: " << memsize << " " << "TOTAL pix:" <<inputSize << " " << "A.size:" << A.size() << " " << "Batchsize:" << batch_size << std::endl; */ float* A_v = (float*) malloc(memsize * sizeof(float)); memsize = CATEGORIES * batch_size; float* Y_one_hot = (float*) malloc(memsize * sizeof(float)); vectorize(A, A_v, inputSize, Y_batch, Y_one_hot, CATEGORIES); // A_v now: batch_size * curN // transpose A_v to curN * batchSize transpose(A_v, batch_size, inputSize); float* A_final = forwardPropagation(A_v, this->layers); // final activations -> compute cost and grads // cross-entropy cost avgMiniBatchCost += computeCost(A_final, Y_one_hot); correctPred += correctPredict(A_final, Y_one_hot); // compute grad from loss functions float* dZ = computeLossGrad(A_final, Y_one_hot); // backward propgation backwardPropagation(dZ, this->layers); std::cout << "\r" << "Batch progress: " << (mIdx + 1) * BATCH_SIZE << "/" << X_train.size() << std::flush; } duration = (std::clock() - start) / (double)CLOCKS_PER_SEC; avgMiniBatchCost /= numOfMiniBatches; std::cout << std::endl; std::cout << "Cost after " << eIdx << " epoch: " << avgMiniBatchCost << std::endl; std::cout << "Correct Predicts after " << eIdx << " epoch: " << correctPred << "/" << X_train.size() << std::endl; std::cout << "Epoch Training time is: " << duration << " sec" << std::endl; std::cout << DELIMITER << std::endl; } std::cout << "Training Complete" << std::endl; std::cout << DELIMITER << std::endl; } void CNN::evaluate(std::vector<float*>& X_test, std::vector<int>& Y_test) { int inputSize = inputShape[0] * inputShape[1] * inputShape[2]; int test_size = X_test.size(); int memsize = inputSize * test_size; float* A_v = (float*)malloc(memsize * sizeof(float)); memsize = CATEGORIES * test_size; float* Y_one_hot = (float*)malloc(memsize * sizeof(float)); vectorize(X_test, A_v, inputSize, Y_test, Y_one_hot, CATEGORIES); // A_v now: batch_size * curN // transpose A_v to curN * batchSize transpose(A_v, test_size, inputSize); float* A_final = forwardPropagation(A_v, this->layers); float cost = computeCost(A_final, Y_one_hot); int correctPred = correctPredict(A_final, Y_one_hot); std::cout << "Final cost on test: " << cost << std::endl; std::cout << "Predict accuracy on test: " << correctPred << "/" << X_test.size() << std::endl; } // Relu Layer float* ReLU::forward(float* A_prev) { // allocate memory for current layer activation int curN = curShape[0]; int prevN = prevShape[0]; float *AL = (float *)malloc(curN * BATCH_SIZE * sizeof(float)); if (this->A) { free(this->A); this->A = nullptr; } this->A = A_prev; // A_prev: prevN * batchSize // W: curN * prevN // AL: curN * batchSize matrixMul(AL, this->W, A_prev, curN, prevN, BATCH_SIZE); matrixAddBias(AL, this->b, curN, BATCH_SIZE); relu(AL, curN * BATCH_SIZE); return AL; } float* ReLU::backward(float* dZ) { float* dZ_prev, *dW, *dB, *Wtmp; int curN = curShape[0]; int prevN = prevShape[0]; dW = (float*)malloc(curN * prevN * sizeof(float)); dB = (float*)malloc(curN * sizeof(float)); dZ_prev = (float*)malloc(prevN * BATCH_SIZE * sizeof(float)); Wtmp = (float*)malloc(prevN * curN * sizeof(float)); memcpy(Wtmp, this->W, prevN *curN * sizeof(float)); transpose(this->A, prevShape[0], BATCH_SIZE); matrixMul(dW, dZ, this->A, curN, BATCH_SIZE, prevN); // dZ: curN x batch_size matrixSum(dB, dZ, curN, BATCH_SIZE); // compute dZ_prev transpose(Wtmp, curN, prevN); matrixMul(dZ_prev, Wtmp, dZ, prevN, curN, BATCH_SIZE); reluGradFilter(dZ_prev, prevN, BATCH_SIZE); updateW(this->W, dW, curN, prevN, this->learningRate); updateB(this->b, dB, curN, this->learningRate); free(dW); free(dB); free(Wtmp); return dZ_prev; } // SoftMax Layer float* SoftMax::forward(float* A_prev) { // allocate memory for current layer activation int curN = curShape[0]; int prevN = prevShape[0]; float *AL = (float *)malloc(curN * BATCH_SIZE * sizeof(float)); if (this->A) { free(this->A); this->A = nullptr; } this->A = A_prev; // A_prev: prevN * batchSize // W: curN * prevN // AL: curN * batchSize matrixMul(AL, this->W, A_prev, curN, prevN, BATCH_SIZE); matrixAddBias(AL, this->b, curN, BATCH_SIZE); softmax(AL, curN * BATCH_SIZE); return AL; } float* SoftMax::backward(float* dZ) { float* dZ_prev, *dW, *dB, *Wtmp; int curN = curShape[0]; int prevN = prevShape[0]; dW = (float*)malloc(curN * prevN * sizeof(float)); dB = (float*)malloc(curN * sizeof(float)); dZ_prev = (float*)malloc(prevN * BATCH_SIZE * sizeof(float)); Wtmp = (float*)malloc(prevN * curN * sizeof(float)); memcpy(Wtmp, this->W, prevN *curN * sizeof(float)); transpose(this->A, prevShape[0], BATCH_SIZE); matrixMul(dW, dZ, this->A, curN, BATCH_SIZE, prevN); // dZ: curN x batch_size matrixSum(dB, dZ, curN, BATCH_SIZE); // compute dZ_prev transpose(Wtmp, curN, prevN); matrixMul(dZ_prev, Wtmp, dZ, prevN, curN, BATCH_SIZE); reluGradFilter(dZ_prev, prevN, BATCH_SIZE); updateW(this->W, dW, curN, prevN, this->learningRate); updateB(this->b, dB, curN, this->learningRate); free(dW); free(dB); free(Wtmp); return dZ_prev; } float CNN::computeCost(float* A_final, float* Y_one_hot) { float cost = crossEntropyLoss(A_final, Y_one_hot, CATEGORIES, BATCH_SIZE); return cost; } // compute grad from loss function // implement cross-entropy loss float* CNN::computeLossGrad(float* A_final, float* Y_one_hot) { float* dZ = elementWiseMinus(A_final, Y_one_hot, CATEGORIES, BATCH_SIZE); return dZ; } int CNN::correctPredict(float* A_final, float* Y_one_hot) { int count = getCorrectCount(A_final, Y_one_hot, CATEGORIES, BATCH_SIZE); return count; } }
e90dd8fa90295ff59ef320394c8edebdc92b61a8.cu
/************************************************************************* /* ECE 285: GPU Programmming 2019 Winter quarter /* Author and Instructer: Hou Wang /* Copyright 2019 /* University of California, San Diego /*************************************************************************/ #include "cnn.h" #include <algorithm> #include <iostream> #include <cuda_runtime.h> #include <cuda.h> #include <string> #include <cmath> #include <ctime> #define CURSTATE_LEN 233 #define BLOCK_SIZE 16 namespace cnn { const std::string DELIMITER = "====================================================="; __global__ void matrixMultiplication(float *a, float *b, float *c, int m, int n, int k) { int row = blockIdx.y * blockDim.y + threadIdx.y; int col = blockIdx.x * blockDim.x + threadIdx.x; float sum = 0; if (col < k && row < m) { for (int i = 0; i < n; i++) { sum += a[row * n + i] * b[i * k + col]; } c[row * k + col] = sum; } } // A: m x n, B:m x 1 __global__ void matrixBias(float *d_A, float *d_B, int m, int n) { int idx = threadIdx.x + blockIdx.x * blockDim.x; int idy = threadIdx.y + blockIdx.y * blockDim.y; if (idx < n && idy < m) { int index = idy * n + idx; d_A[index] += d_B[idy]; } } __global__ void reluActivation(float *d_AL, int m) { int idx = threadIdx.x + blockIdx.x * blockDim.x; if (idx < m) { float val = d_AL[idx]; d_AL[idx] = val > 0 ? val : 0; } } __global__ void softmaxActivation(float *d_AL, int m) { int idx = threadIdx.x + blockIdx.x * blockDim.x; float exp = expf(d_AL[idx]); d_AL[idx] = exp; float sum = 0; // compute sum int startIdx = idx % CATEGORIES; int end = startIdx + CATEGORIES; for (int i = startIdx; i < end; ++i) { sum += d_AL[i]; } float sm = exp / sum; d_AL[idx] = sm; } __global__ void mulTranspose(float *odata, float *idata, int width, int height) { __shared__ float block[BLOCK_SIZE][BLOCK_SIZE + 1]; // read the matrix tile into shared memory unsigned int xIndex = blockIdx.x * BLOCK_SIZE + threadIdx.x; unsigned int yIndex = blockIdx.y * BLOCK_SIZE + threadIdx.y; if ((xIndex < width) && (yIndex < height)) { unsigned int index_in = yIndex * width + xIndex; block[threadIdx.y][threadIdx.x] = idata[index_in]; } __syncthreads(); // write the transposed matrix tile to global memory xIndex = blockIdx.y * BLOCK_SIZE + threadIdx.x; yIndex = blockIdx.x * BLOCK_SIZE + threadIdx.y; if ((xIndex < height) && (yIndex < width)) { unsigned int index_out = yIndex * height + xIndex; odata[index_out] = block[threadIdx.x][threadIdx.y]; } } __global__ void setup_rand(curandState* state, int w, int h, int c) { int idxX = blockIdx.x * blockDim.x + threadIdx.x; int idxY = blockIdx.x * blockDim.y + threadIdx.y; int idxZ = blockIdx.z * blockDim.z + threadIdx.z; if (idxX < w && idxY < h && idxZ < c) { int idx = idxZ * (w * h) + idxY * w + idxX; // TODO: in final run, remove CURSTATE_LEN limit int curS = idx; curand_init((unsigned long long)clock() + curS, curS, 0, &state[curS]); } } __global__ void random_init(curandState* state, float* W, int w, int h, int c, float range) { int idxX = blockIdx.x * blockDim.x + threadIdx.x; int idxY = blockIdx.y * blockDim.y + threadIdx.y; int idxZ = blockIdx.z * blockDim.z + threadIdx.z; if (idxX < w && idxY < h && idxZ < c) { int idx = idxZ * (w * h) + idxY * w + idxX; curandState localState = state[idx]; float val = curand_uniform(&localState) * range; W[idx] = val; state[idx] = localState; } } // A = Y: m x n __global__ void calCrossEntropyLoss (float* d_out, float *d_A, float *d_Y, int m, int n) { int nidx = threadIdx.x + blockDim.x * blockIdx.x; if (nidx < n) { for (int midx = 0; midx < m; ++midx) { int curIdx = midx * n + nidx; if (d_Y[curIdx] != 0) { d_out[midx] += -logf(d_A[curIdx]); break; } } } } // A = Y: m x n __global__ void elementWiseMatrixDeduction(float* d_out, float* d_A, float *d_Y, int m, int n) { int mIdx = threadIdx.y + blockDim.y * blockIdx.y; int nIdx = threadIdx.x + blockDim.x * blockIdx.x; if (mIdx < m && nIdx < n) { int idx = mIdx * n + nIdx; d_out[idx] = d_A[idx] - d_Y[idx]; } } __global__ void matrixSumToOneAxis (float *d_dB, float *d_dZ, int m, int n) { int mIdx = threadIdx.x + blockIdx.x * blockDim.x; if (mIdx < m) { float sum = 0; for (int i = 0; i < n; ++i) { int idx = mIdx * n + i; d_dB[mIdx] += d_dZ[idx]; } } } __global__ void reluGrad(float *d_dZ, int m, int n) { int mIdx = threadIdx.y + blockIdx.y * blockDim.y; int nIdx = threadIdx.x + blockIdx.x * blockDim.x; if (mIdx < m && nIdx < n) { int idx = mIdx * n + nIdx; float val = d_dZ[idx]; d_dZ[idx] = (val > 0) ? val : 0; } } __global__ void elementWiseMatrixMultiplication(float *d_dW, float learningRate, int m, int n) { int mIdx = threadIdx.y + blockIdx.y * blockDim.y; int nIdx = threadIdx.x + blockIdx.x * blockDim.x; if (mIdx < m && nIdx < n) { int idx = mIdx * n + nIdx; d_dW[idx] *= learningRate; } } __global__ void countCorrectPredict(int* d_count, float* d_A, float* d_Y, int m, int n) { int nIdx = threadIdx.x + blockDim.x * blockIdx.x; if (nIdx < n) { bool isCorrect = false; float max = FLT_MIN; for (int i = 0; i < m; ++i) { int idx = m * i + nIdx; float cur = d_A[idx]; float curLabel = d_Y[idx]; if (cur > max) { isCorrect = d_Y[idx] != 0; max = cur; } } if (isCorrect) d_count[nIdx] = 1; else d_count[nIdx] = 0; } } // ========================================================================== // Kernel function wrappers: void transpose(float* A_v, int height, int width) { dim3 block(BLOCK_SIZE, BLOCK_SIZE); dim3 grid((width + block.x - 1) / block.x, (height + block.y - 1) / block.y); float *d_A_v, *d_out; CHECK(cudaMalloc((void**)&d_A_v, height * width * sizeof(float))); CHECK(cudaMalloc((void**)&d_out, height * width * sizeof(float))); CHECK(cudaMemcpy(d_A_v, A_v, height * width * sizeof(float), cudaMemcpyHostToDevice)); mulTranspose <<<grid, block >>> (d_out, d_A_v, width, height); CHECK(cudaMemcpy(A_v, d_out, height * width * sizeof(float), cudaMemcpyDeviceToHost)); cudaFree(d_A_v); cudaFree(d_out); } // A: m x n, B: n x k void matrixMul(float* out, float* A, float* B, int m, int n, int k) { float *d_out, *d_A, *d_B; CHECK(cudaMalloc((void**)&d_out, m * k * sizeof(float))); CHECK(cudaMalloc((void**)&d_A, m * n * sizeof(float))); CHECK(cudaMalloc((void**)&d_B, n * k * sizeof(float))); CHECK(cudaMemcpy(d_A, A, m * n * sizeof(float), cudaMemcpyHostToDevice)); CHECK(cudaMemcpy(d_B, B, n * k * sizeof(float), cudaMemcpyHostToDevice)); dim3 block(BLOCK_SIZE, BLOCK_SIZE); dim3 grid((k + block.x - 1) / block.x, (m + block.y - 1) / block.y); matrixMultiplication << <grid, block >> > (d_A, d_B, d_out, m, n, k); CHECK(cudaMemcpy(out, d_out, m * k * sizeof(float), cudaMemcpyDeviceToHost)); cudaFree(d_out); cudaFree(d_A); cudaFree(d_B); } // A: m x n, B: m x 1 void matrixAddBias(float* A, float* B, int m, int n) { float* d_A, *d_B; CHECK(cudaMalloc((void**)&d_A, m * n * sizeof(float))); CHECK(cudaMalloc((void**)&d_B, m * sizeof(float))); CHECK(cudaMemcpy(d_A, A, m * n * sizeof(float), cudaMemcpyHostToDevice)); CHECK(cudaMemcpy(d_B, B, m * sizeof(float), cudaMemcpyHostToDevice)); dim3 block(BLOCK_SIZE, BLOCK_SIZE); dim3 grid((n + block.x - 1) / block.x, (m + block.y - 1) / block.y); matrixBias <<<grid, block >>> (d_A, d_B, m, n); CHECK(cudaMemcpy(A, d_A, m * n * sizeof(float), cudaMemcpyDeviceToHost)); cudaFree(d_A); cudaFree(d_B); } // aply relu activation on all A units void relu(float* A, int len) { float* d_A; CHECK(cudaMalloc((void**)&d_A, len * sizeof(float))); CHECK(cudaMemcpy(d_A, A, len * sizeof(float), cudaMemcpyHostToDevice)); dim3 block(BLOCK_SIZE * BLOCK_SIZE); dim3 grid((len + block.x - 1) / block.x); reluActivation <<<grid, block >>> (d_A, len); CHECK(cudaMemcpy(A, d_A, len * sizeof(float), cudaMemcpyDeviceToHost)); cudaFree(d_A); } void softmax(float* A, int len) { float *d_A; CHECK(cudaMalloc((void**)&d_A, len * sizeof(float))); CHECK(cudaMemcpy(d_A, A, len * sizeof(float), cudaMemcpyHostToDevice)); dim3 block(150); dim3 grid((len + block.x - 1) / block.x); softmaxActivation <<<grid, block >>> (d_A, len); CHECK(cudaMemcpy(A, d_A, len * sizeof(float), cudaMemcpyDeviceToHost)); cudaFree(d_A); } // A = Y: m x n float crossEntropyLoss(float *A_final, float *Y_one_hot, int m, int n) { float *d_A, *d_Y, *d_out; float *out = (float *)malloc(m * sizeof(float)); CHECK(cudaMalloc((void**)&d_A, m * n * sizeof(float))); CHECK(cudaMalloc((void**)&d_Y, m * n * sizeof(float))); CHECK(cudaMalloc((void**)&d_out, m * sizeof(float))); CHECK(cudaMemcpy(d_A, A_final, m *n * sizeof(float), cudaMemcpyHostToDevice)); CHECK(cudaMemcpy(d_Y, Y_one_hot, m * n * sizeof(float), cudaMemcpyHostToDevice)); dim3 block(BLOCK_SIZE); dim3 grid((n + block.x - 1) / block.x); calCrossEntropyLoss <<<grid, block >>> (d_out, d_A, d_Y, m, n); CHECK(cudaMemcpy(out, d_out, m * sizeof(float), cudaMemcpyDeviceToHost)); float cost = 0; for (int i = 0; i < m; ++i) { cost += out[i]; } cost /= m; cudaFree(d_A); cudaFree(d_Y); cudaFree(d_out); return cost; } // A = Y: m x n float* elementWiseMinus(float *A_final, float *Y_one_hot, int m, int n) { float *d_A, *d_Y, *d_out; float *out = (float *)malloc(m * n * sizeof(float)); CHECK(cudaMalloc((void**)&d_A, m * n * sizeof(float))); CHECK(cudaMalloc((void**)&d_Y, m * n * sizeof(float))); CHECK(cudaMalloc((void**)&d_out, m * n * sizeof(float))); CHECK(cudaMemcpy(d_A, A_final, m * n * sizeof(float), cudaMemcpyHostToDevice)); CHECK(cudaMemcpy(d_Y, Y_one_hot, m * n * sizeof(float), cudaMemcpyHostToDevice)); dim3 block(BLOCK_SIZE, BLOCK_SIZE); dim3 grid((n + block.x - 1) / block.x, (m + block.y - 1) / block.y); elementWiseMatrixDeduction << <grid, block >> > (d_out, d_A, d_Y, m, n); CHECK(cudaMemcpy(out, d_out, m * n * sizeof(float), cudaMemcpyDeviceToHost)); cudaFree(d_A); cudaFree(d_Y); cudaFree(d_out); return out; } // dB: m x 1, dZ: m x n void matrixSum(float* dB, float* dZ, int m, int n) { float *d_dB, *d_dZ; CHECK(cudaMalloc((void**)&d_dZ, m * n * sizeof(float))); CHECK(cudaMalloc((void**)&d_dB, m * sizeof(float))); CHECK(cudaMemcpy(d_dZ, dZ, m * n * sizeof(float), cudaMemcpyHostToDevice)); dim3 block(BLOCK_SIZE); dim3 grid((m + block.x - 1) / block.x); matrixSumToOneAxis << <grid, block >> > (d_dB, d_dZ, m, n); CHECK(cudaMemcpy(dB, d_dB, m * sizeof(float), cudaMemcpyDeviceToHost)); cudaFree(d_dB); cudaFree(d_dZ); } // dZ: m x n void reluGradFilter(float* dZ, int m, int n) { float *d_dZ; CHECK(cudaMalloc((void**)&d_dZ, m * n * sizeof(float))); CHECK(cudaMemcpy(d_dZ, dZ, m * n * sizeof(float), cudaMemcpyHostToDevice)); dim3 block(BLOCK_SIZE, BLOCK_SIZE); dim3 grid((n + block.x - 1)/ block.x, (m + block.y - 1) / block.y); reluGrad <<<grid, block >>> (d_dZ, m, n); CHECK(cudaMemcpy(dZ, d_dZ, m * n * sizeof(float), cudaMemcpyDeviceToHost)); cudaFree(d_dZ); } void updateW(float* W, float *dW, int m, int n, float learningRate) { float *d_W, *d_dW, *d_out; CHECK(cudaMalloc((void**)&d_W, m * n * sizeof(float))); CHECK(cudaMalloc((void**)&d_dW, m * n * sizeof(float))); CHECK(cudaMalloc((void**)&d_out, m * n * sizeof(float))); CHECK(cudaMemcpy(d_W, W, m * n * sizeof(float), cudaMemcpyHostToDevice)); CHECK(cudaMemcpy(d_dW, dW, m * n * sizeof(float), cudaMemcpyHostToDevice)); dim3 block(BLOCK_SIZE, BLOCK_SIZE); dim3 grid((n + block.x - 1) / block.x, (m + block.y - 1) / block.y); elementWiseMatrixMultiplication <<<grid, block >>> (d_dW, (float)learningRate / BATCH_SIZE, m, n); elementWiseMatrixDeduction <<<grid, block >>> (d_out, d_W, d_dW, m, n); CHECK(cudaMemcpy(W, d_out, m * n * sizeof(float), cudaMemcpyDeviceToHost)); cudaFree(d_W); cudaFree(d_dW); cudaFree(d_out); } void updateB(float* B, float *dB, int m, float learningRate) { float *d_B, *d_dB, *d_out; CHECK(cudaMalloc((void**)&d_B, m * sizeof(float))); CHECK(cudaMalloc((void**)&d_dB, m * sizeof(float))); CHECK(cudaMalloc((void**)&d_out, m * sizeof(float))); CHECK(cudaMemcpy(d_B, B, m * sizeof(float), cudaMemcpyHostToDevice)); CHECK(cudaMemcpy(d_dB, dB, m * sizeof(float), cudaMemcpyHostToDevice)); dim3 block(1, BLOCK_SIZE); dim3 grid(1, (m + block.y - 1) / block.y); elementWiseMatrixMultiplication << <grid, block >> > (d_dB, (float)learningRate / BATCH_SIZE, m, 1); elementWiseMatrixDeduction << <grid, block >> > (d_out, d_B, d_dB, m, 1); CHECK(cudaMemcpy(B, d_out, m * sizeof(float), cudaMemcpyDeviceToHost)); cudaFree(d_B); cudaFree(d_dB); cudaFree(d_out); } int getCorrectCount(float* A_final, float* Y_one_hot, int m, int n) { float* d_A, *d_Y; int *count, *d_count; count = (int *)malloc(n * sizeof(int)); CHECK(cudaMalloc((void**)&d_A, m * n *sizeof(float))); CHECK(cudaMalloc((void**)&d_Y, m * n * sizeof(float))); CHECK(cudaMalloc((void**)&d_count, n * sizeof(int))); CHECK(cudaMemcpy(d_A, A_final, m * n * sizeof(float), cudaMemcpyHostToDevice)); CHECK(cudaMemcpy(d_Y, Y_one_hot, m * n * sizeof(float), cudaMemcpyHostToDevice)); dim3 block(BLOCK_SIZE); dim3 grid((n + block.y - 1) / block.y); countCorrectPredict <<<grid, block >>> (d_count, d_A, d_Y, m, n); CHECK(cudaMemcpy(count, d_count, n * sizeof(float), cudaMemcpyDeviceToHost)); int sum = 0; for (int i = 0; i < n; ++i) { sum += count[i]; } free(count); cudaFree(d_A); cudaFree(d_Y); cudaFree(d_count); return sum; } // =================================================================================== // Class implementations void CNN::addLayer(Layer* layer) { this->layers.push_back(layer); } void ReLU::init() { // conv filter dimension: curNeuron * prevNeuron int w = prevShape[0]; int h = curShape[0]; int n = w * h; float range = sqrtf((float)2 / n); dim3 block(32, 8); dim3 grid((w + block.x - 1) / block.x, (h + block.y - 1) / block.y); curandState *randState; // set up cuda random generator CHECK(cudaMalloc((void**)&randState, n * sizeof(curandState))); //test_setup_rand << <grid, block >> > (randState, w, h, 1); setup_rand << <grid, block >> > (randState, w, h, 1); this->W = (float*)malloc(n * sizeof(float)); this->b = (float*)malloc(h * sizeof(float)); memset(b, 0, h * sizeof(float)); float* d_W; CHECK(cudaMalloc((void**) &d_W, n * sizeof(float))); random_init <<< grid, block >>> (randState, d_W, w, h, 1, range); CHECK(cudaMemcpy(W, d_W, n * sizeof(float), cudaMemcpyDeviceToHost)); cudaFree(d_W); cudaFree(randState); } void SoftMax::init() { // conv filter dimension: curNeuron * prevNeuron int w = this->prevShape[0]; int h = this->curShape[0]; int n = w * h; float range = sqrtf((float)1 / n); dim3 block(32, 8); dim3 grid((w + block.x - 1) / block.x, (h + block.y - 1) / block.y); curandState* randState; // set up cuda random generator CHECK(cudaMalloc((void**)&randState, n * sizeof(curandState))); setup_rand << <grid, block >> > (randState, w, h, 1); this->W = (float*)malloc(n * sizeof(float)); this->b = (float*)malloc(h * sizeof(float)); memset(b, 0, h * sizeof(float)); float* d_W; CHECK(cudaMalloc((void**)&d_W, n * sizeof(float))); random_init << <grid, block >> > (randState, d_W, w, h, 1, range); CHECK(cudaMemcpy(this->W, d_W, n * sizeof(float), cudaMemcpyDeviceToHost)); cudaFree(d_W); cudaFree(randState); } void initMiniBatch(std::vector<int>& v, int size) { for (int i = 0; i < size; ++i) { v.push_back(i); } } void getCurrentBatch(std::vector<float*>& A, std::vector<int>& Y_batch, std::vector<int>& miniBatch, std::vector<float*>& X_train, std::vector<int>& Y_train, int mIdx, int numOfMiniBatches, int batch_size) { int startIdx = mIdx * batch_size; for (int i = 0; i < batch_size; ++i) { int curIdx = startIdx + i; int dataIdx = miniBatch[curIdx]; A.push_back(X_train[dataIdx]); Y_batch.push_back(Y_train[dataIdx]); } } // ========================================= // Helpers int predict(float* A, int Y) { return 0; } void vectorize(std::vector<float*>& A, float* A_v, int A_len, std::vector<int>& Y_batch, float* Y_one_hot, int Y_len) { for (int i = 0; i < A.size(); ++i) { for (int j = 0; j < A_len; ++j) { auto tmp = A[i][j]; A_v[i * A_len + j] = A[i][j];//array overflow, solved by resize img } } for (int i = 0; i < Y_batch.size(); ++i) { for (int j = 0; j < Y_len; ++j) { int idx = i * Y_len + j; Y_one_hot[idx] = j == Y_batch[i] ? 1 : 0; } } } // return activation of last layer A_final float* forwardPropagation(float* X , std::vector<Layer*>& layers) { float* A = X; for (Layer* layer : layers) { // A: batch_size * v A = layer->forward(A); } return A; } void backwardPropagation(float* dZ, std::vector<Layer*>& layers) { // backward prop and update parameters float* dAL = dZ; for (auto layer = layers.rbegin(); layer != layers.rend(); ++layer) { dAL = (*layer)->backward(dAL); } } void CNN::train(std::vector<float*>& X_train, std::vector<int>& Y_train, int epochs, int batch_size) { int m = X_train.size(); for (int eIdx = 0; eIdx < epochs; ++eIdx) { // loop through epochs int miniBatchCost = 0; std::vector<int> miniBatch; initMiniBatch(miniBatch, m); std::random_shuffle(miniBatch.begin(), miniBatch.end()); int numOfMiniBatches = (int) X_train.size() / batch_size; float avgMiniBatchCost = 0; int correctPred = 0; std::clock_t start = std::clock(); double duration; for (int mIdx = 0; mIdx < numOfMiniBatches; ++mIdx) { // forward propagate m samples in current batch // compute avg cost std::vector<float*> A; std::vector<int> Y_batch; getCurrentBatch(A, Y_batch, miniBatch, X_train, Y_train, mIdx, numOfMiniBatches, batch_size); // Y_batch stores current Y_train // A stores current activations computed from forward prop for all m samples int inputSize = inputShape[0] * inputShape[1] * inputShape[2]; int memsize = inputSize * batch_size; /* std::cout << "A_v: " << memsize << " " << "TOTAL pix:" <<inputSize << " " << "A.size:" << A.size() << " " << "Batchsize:" << batch_size << std::endl; */ float* A_v = (float*) malloc(memsize * sizeof(float)); memsize = CATEGORIES * batch_size; float* Y_one_hot = (float*) malloc(memsize * sizeof(float)); vectorize(A, A_v, inputSize, Y_batch, Y_one_hot, CATEGORIES); // A_v now: batch_size * curN // transpose A_v to curN * batchSize transpose(A_v, batch_size, inputSize); float* A_final = forwardPropagation(A_v, this->layers); // final activations -> compute cost and grads // cross-entropy cost avgMiniBatchCost += computeCost(A_final, Y_one_hot); correctPred += correctPredict(A_final, Y_one_hot); // compute grad from loss functions float* dZ = computeLossGrad(A_final, Y_one_hot); // backward propgation backwardPropagation(dZ, this->layers); std::cout << "\r" << "Batch progress: " << (mIdx + 1) * BATCH_SIZE << "/" << X_train.size() << std::flush; } duration = (std::clock() - start) / (double)CLOCKS_PER_SEC; avgMiniBatchCost /= numOfMiniBatches; std::cout << std::endl; std::cout << "Cost after " << eIdx << " epoch: " << avgMiniBatchCost << std::endl; std::cout << "Correct Predicts after " << eIdx << " epoch: " << correctPred << "/" << X_train.size() << std::endl; std::cout << "Epoch Training time is: " << duration << " sec" << std::endl; std::cout << DELIMITER << std::endl; } std::cout << "Training Complete" << std::endl; std::cout << DELIMITER << std::endl; } void CNN::evaluate(std::vector<float*>& X_test, std::vector<int>& Y_test) { int inputSize = inputShape[0] * inputShape[1] * inputShape[2]; int test_size = X_test.size(); int memsize = inputSize * test_size; float* A_v = (float*)malloc(memsize * sizeof(float)); memsize = CATEGORIES * test_size; float* Y_one_hot = (float*)malloc(memsize * sizeof(float)); vectorize(X_test, A_v, inputSize, Y_test, Y_one_hot, CATEGORIES); // A_v now: batch_size * curN // transpose A_v to curN * batchSize transpose(A_v, test_size, inputSize); float* A_final = forwardPropagation(A_v, this->layers); float cost = computeCost(A_final, Y_one_hot); int correctPred = correctPredict(A_final, Y_one_hot); std::cout << "Final cost on test: " << cost << std::endl; std::cout << "Predict accuracy on test: " << correctPred << "/" << X_test.size() << std::endl; } // Relu Layer float* ReLU::forward(float* A_prev) { // allocate memory for current layer activation int curN = curShape[0]; int prevN = prevShape[0]; float *AL = (float *)malloc(curN * BATCH_SIZE * sizeof(float)); if (this->A) { free(this->A); this->A = nullptr; } this->A = A_prev; // A_prev: prevN * batchSize // W: curN * prevN // AL: curN * batchSize matrixMul(AL, this->W, A_prev, curN, prevN, BATCH_SIZE); matrixAddBias(AL, this->b, curN, BATCH_SIZE); relu(AL, curN * BATCH_SIZE); return AL; } float* ReLU::backward(float* dZ) { float* dZ_prev, *dW, *dB, *Wtmp; int curN = curShape[0]; int prevN = prevShape[0]; dW = (float*)malloc(curN * prevN * sizeof(float)); dB = (float*)malloc(curN * sizeof(float)); dZ_prev = (float*)malloc(prevN * BATCH_SIZE * sizeof(float)); Wtmp = (float*)malloc(prevN * curN * sizeof(float)); memcpy(Wtmp, this->W, prevN *curN * sizeof(float)); transpose(this->A, prevShape[0], BATCH_SIZE); matrixMul(dW, dZ, this->A, curN, BATCH_SIZE, prevN); // dZ: curN x batch_size matrixSum(dB, dZ, curN, BATCH_SIZE); // compute dZ_prev transpose(Wtmp, curN, prevN); matrixMul(dZ_prev, Wtmp, dZ, prevN, curN, BATCH_SIZE); reluGradFilter(dZ_prev, prevN, BATCH_SIZE); updateW(this->W, dW, curN, prevN, this->learningRate); updateB(this->b, dB, curN, this->learningRate); free(dW); free(dB); free(Wtmp); return dZ_prev; } // SoftMax Layer float* SoftMax::forward(float* A_prev) { // allocate memory for current layer activation int curN = curShape[0]; int prevN = prevShape[0]; float *AL = (float *)malloc(curN * BATCH_SIZE * sizeof(float)); if (this->A) { free(this->A); this->A = nullptr; } this->A = A_prev; // A_prev: prevN * batchSize // W: curN * prevN // AL: curN * batchSize matrixMul(AL, this->W, A_prev, curN, prevN, BATCH_SIZE); matrixAddBias(AL, this->b, curN, BATCH_SIZE); softmax(AL, curN * BATCH_SIZE); return AL; } float* SoftMax::backward(float* dZ) { float* dZ_prev, *dW, *dB, *Wtmp; int curN = curShape[0]; int prevN = prevShape[0]; dW = (float*)malloc(curN * prevN * sizeof(float)); dB = (float*)malloc(curN * sizeof(float)); dZ_prev = (float*)malloc(prevN * BATCH_SIZE * sizeof(float)); Wtmp = (float*)malloc(prevN * curN * sizeof(float)); memcpy(Wtmp, this->W, prevN *curN * sizeof(float)); transpose(this->A, prevShape[0], BATCH_SIZE); matrixMul(dW, dZ, this->A, curN, BATCH_SIZE, prevN); // dZ: curN x batch_size matrixSum(dB, dZ, curN, BATCH_SIZE); // compute dZ_prev transpose(Wtmp, curN, prevN); matrixMul(dZ_prev, Wtmp, dZ, prevN, curN, BATCH_SIZE); reluGradFilter(dZ_prev, prevN, BATCH_SIZE); updateW(this->W, dW, curN, prevN, this->learningRate); updateB(this->b, dB, curN, this->learningRate); free(dW); free(dB); free(Wtmp); return dZ_prev; } float CNN::computeCost(float* A_final, float* Y_one_hot) { float cost = crossEntropyLoss(A_final, Y_one_hot, CATEGORIES, BATCH_SIZE); return cost; } // compute grad from loss function // implement cross-entropy loss float* CNN::computeLossGrad(float* A_final, float* Y_one_hot) { float* dZ = elementWiseMinus(A_final, Y_one_hot, CATEGORIES, BATCH_SIZE); return dZ; } int CNN::correctPredict(float* A_final, float* Y_one_hot) { int count = getCorrectCount(A_final, Y_one_hot, CATEGORIES, BATCH_SIZE); return count; } }
bce4d3fb3336085ec9fc8fed26b4cfed3fde641d.hip
// !!! This is a file automatically generated by hipify!!! #include <stdio.h> #include <hip/hip_runtime.h> #include "bcsr.hpp" #include "utils.hpp" __device__ void swap(int* arr, int i, int j) { int temp = arr[i]; arr[i] = arr[j]; arr[j] = temp; return; } __global__ void bitonic_sort(float* mat, int N, int* idx) { __shared__ int nnz[8]; int tid = blockIdx.x * blockDim.x + threadIdx.x; float* start = mat + tid * N; // cal nnz nnz[tid] = 0; for ( int i = 0 ; i < N ; i ++ ) { if ( start[i] != 0 ) nnz[tid] += 1; } __syncthreads(); for (unsigned int k = 2 ; k <= 8 ; k *= 2) { for (unsigned int j = k / 2; j > 0; j /= 2){ unsigned int ixj = tid ^ j; // determine which element to compare, every j changes if ixj is tid + j or tid - j if (ixj > tid){ if ((tid & k) == 0) { // determine arrow direction, every k changes if tid & k is = 1 or = 0 if (nnz[tid] > nnz[ixj]) { // swap both nnz and tid swap(nnz, tid, ixj); swap(idx, tid, ixj); } } else { if (nnz[tid] < nnz[ixj]) { swap(nnz, tid, ixj); swap(idx, tid, ixj); } } } __syncthreads(); } } } int main() { float arr[64] = { 0, 1, 1, 1, 1, 0, 1, 0, // 5 1, 1, 1, 1, 1, 0, 1, 0, // 6 0, 1, 1, 1, 0, 0, 1, 0, // 4 0, 1, 0, 0, 1, 0, 1, 0, // 3 0, 1, 1, 1, 1, 1, 1, 1, // 7 0, 0, 0, 0, 0, 0, 0, 0, // 0 0, 1, 0, 0, 0, 0, 1, 0, // 2 0, 0, 0, 0, 0, 0, 1, 0, // 1 }; float arr1[64] = { 0, 1, 1, 1, 1, 1, 1, 1, // 7 1, 1, 1, 1, 1, 0, 1, 0, // 6 0, 1, 1, 1, 1, 0, 1, 0, // 5 0, 1, 1, 1, 0, 0, 1, 0, // 4 0, 1, 0, 0, 1, 0, 1, 0, // 3 0, 1, 0, 0, 0, 0, 1, 0, // 2 0, 0, 0, 0, 0, 0, 1, 0, // 1 0, 0, 0, 0, 0, 0, 0, 0, // 0 }; // 5 7 6 3 2 0 1 4 int res[8] = { 0, 1, 2, 3, 4, 5, 6, 7 }; float* d_arr; int* d_res; bcsr bcsr_mat{8, 8, 2, 2}; cal_block(&bcsr_mat, arr); printf("current nnz block num: %d\n", bcsr_mat.nnz_block_num); bcsr bcsr_mat1{8, 8, 2, 2}; cal_block(&bcsr_mat1, arr1); printf("current nnz block num: %d\n", bcsr_mat1.nnz_block_num); hipMalloc(&d_arr, sizeof(float) * 64); hipMalloc(&d_res, sizeof(int) * 8); hipMemcpy(d_arr, arr, sizeof(float) * 64, hipMemcpyHostToDevice); hipMemcpy(d_res, res, sizeof(int) * 8, hipMemcpyHostToDevice); hipLaunchKernelGGL(( bitonic_sort), dim3(1), dim3(8), 0, 0, d_arr, 8, d_res); hipDeviceSynchronize(); hipMemcpy(res, d_res, sizeof(int) * 8, hipMemcpyDeviceToHost); for ( int i = 0 ; i < 8 ; i ++ ) { printf("%d ", res[i]); } printf("\n"); }
bce4d3fb3336085ec9fc8fed26b4cfed3fde641d.cu
#include <stdio.h> #include <cuda_runtime.h> #include "bcsr.hpp" #include "utils.hpp" __device__ void swap(int* arr, int i, int j) { int temp = arr[i]; arr[i] = arr[j]; arr[j] = temp; return; } __global__ void bitonic_sort(float* mat, int N, int* idx) { __shared__ int nnz[8]; int tid = blockIdx.x * blockDim.x + threadIdx.x; float* start = mat + tid * N; // cal nnz nnz[tid] = 0; for ( int i = 0 ; i < N ; i ++ ) { if ( start[i] != 0 ) nnz[tid] += 1; } __syncthreads(); for (unsigned int k = 2 ; k <= 8 ; k *= 2) { for (unsigned int j = k / 2; j > 0; j /= 2){ unsigned int ixj = tid ^ j; // determine which element to compare, every j changes if ixj is tid + j or tid - j if (ixj > tid){ if ((tid & k) == 0) { // determine arrow direction, every k changes if tid & k is = 1 or = 0 if (nnz[tid] > nnz[ixj]) { // swap both nnz and tid swap(nnz, tid, ixj); swap(idx, tid, ixj); } } else { if (nnz[tid] < nnz[ixj]) { swap(nnz, tid, ixj); swap(idx, tid, ixj); } } } __syncthreads(); } } } int main() { float arr[64] = { 0, 1, 1, 1, 1, 0, 1, 0, // 5 1, 1, 1, 1, 1, 0, 1, 0, // 6 0, 1, 1, 1, 0, 0, 1, 0, // 4 0, 1, 0, 0, 1, 0, 1, 0, // 3 0, 1, 1, 1, 1, 1, 1, 1, // 7 0, 0, 0, 0, 0, 0, 0, 0, // 0 0, 1, 0, 0, 0, 0, 1, 0, // 2 0, 0, 0, 0, 0, 0, 1, 0, // 1 }; float arr1[64] = { 0, 1, 1, 1, 1, 1, 1, 1, // 7 1, 1, 1, 1, 1, 0, 1, 0, // 6 0, 1, 1, 1, 1, 0, 1, 0, // 5 0, 1, 1, 1, 0, 0, 1, 0, // 4 0, 1, 0, 0, 1, 0, 1, 0, // 3 0, 1, 0, 0, 0, 0, 1, 0, // 2 0, 0, 0, 0, 0, 0, 1, 0, // 1 0, 0, 0, 0, 0, 0, 0, 0, // 0 }; // 5 7 6 3 2 0 1 4 int res[8] = { 0, 1, 2, 3, 4, 5, 6, 7 }; float* d_arr; int* d_res; bcsr bcsr_mat{8, 8, 2, 2}; cal_block(&bcsr_mat, arr); printf("current nnz block num: %d\n", bcsr_mat.nnz_block_num); bcsr bcsr_mat1{8, 8, 2, 2}; cal_block(&bcsr_mat1, arr1); printf("current nnz block num: %d\n", bcsr_mat1.nnz_block_num); cudaMalloc(&d_arr, sizeof(float) * 64); cudaMalloc(&d_res, sizeof(int) * 8); cudaMemcpy(d_arr, arr, sizeof(float) * 64, cudaMemcpyHostToDevice); cudaMemcpy(d_res, res, sizeof(int) * 8, cudaMemcpyHostToDevice); bitonic_sort<<<1, 8>>>(d_arr, 8, d_res); cudaDeviceSynchronize(); cudaMemcpy(res, d_res, sizeof(int) * 8, cudaMemcpyDeviceToHost); for ( int i = 0 ; i < 8 ; i ++ ) { printf("%d ", res[i]); } printf("\n"); }
bcc5c89ab1942c5977d7c75893f6124f12bd3872.hip
// !!! This is a file automatically generated by hipify!!! /* 1-bit BMMA code. Runs at 500TOPS for matrix size of 4096x4096x8192. Borrows largely from CUDA-SDK. By Boyuan */ #include <assert.h> #include <hip/hip_runtime.h> #include <mma.h> #include <stdio.h> #include <helper_cuda.h> #include <helper_functions.h> // GPU configuration. #define WARP_SIZE 32 // MMA matrix tile dimensions. #define M 8 #define N 8 #define K 128 #define C_LAYOUT wmma::mem_row_major // Implementation constants. #define WARPS_PER_BLOCK 8 #define THREADS_PER_BLOCK (WARP_SIZE * WARPS_PER_BLOCK) #define CHUNK_K 1 #define BLOCK_ROW_WARPS 2 #define BLOCK_COL_WARPS 4 #define WARP_ROW_TILES 4 #define WARP_COL_TILES 2 #define BLOCK_ROW_TILES (WARP_ROW_TILES * BLOCK_ROW_WARPS) #define BLOCK_COL_TILES (WARP_COL_TILES * BLOCK_COL_WARPS) #define GLOBAL_MEM_STRIDE N_GLOBAL #define SHMEM_STRIDE (N * BLOCK_ROW_TILES) #define SHMEM_OFFSET (N * WARP_ROW_TILES) // The macro below is used to shift rows of the A matrix and columns of the B // matrix in shared memory to minimize possible bank conflicts. Before // performing the nvcuda::wmma::mma_sync operation, the warp must load the // matrix data using the nvcuda::wmma::load_matrix_sync operation. Although the // memory access pattern is not specified for that function, each lane in the // warp can read one or multiple matrix elements from different matrix rows or // columns. For shared memory, such access can result in bank conflicts if // different rows / columns of the matrix map to the same bank. By shifting each // row and column by a few bytes, we make sure that they map to different banks, // thus reducing the number of possible bank conflicts. The number of 32 // one-byte "uint8_t" elements is chosen as the minimum possible shift because // we must keep each row and column 256-bit aligned, as required by // nvcuda::wmma::load_matrix_sync. #define SKEW 0 // Updated for int4 #define checkKernelErrors(expr) \ do { \ expr; \ \ hipError_t __err = hipGetLastError(); \ if (__err != hipSuccess) { \ printf("Line %d: '%s' failed: %s\n", __LINE__, #expr, \ hipGetErrorString(__err)); \ abort(); \ } \ } while (0) using namespace nvcuda; using namespace nvcuda::wmma::experimental; __global__ void apmm_w3a1(const int4 *W, const int4 *X, int *D, int M_GLOBAL, int N_GLOBAL, int K_GLOBAL, int wb, int xb) { // GEMM configuration. // printf("ckpt0\n"); int K_TILES = K_GLOBAL / 128; int W_bit_offset = M_GLOBAL*K_GLOBAL/128; int X_bit_offset = N_GLOBAL*K_GLOBAL/128; int ROW_BIT = K_GLOBAL/128; extern __shared__ int4 shmem[][CHUNK_K+SKEW]; // TODO: Padding opportunity may exist here. // Warp and lane identification. const unsigned int warpId = threadIdx.x / WARP_SIZE; const unsigned int laneId = threadIdx.x % WARP_SIZE; // if (warpId == 0 && laneId == 0 && blockIdx.x==0) { // for(int i=0; i<M_GLOBAL; i++) { // for(int j=0; j<K_GLOBAL/32; j++) { // printf("W[%d][%d]: %x\n", i, j, *((int*)W+i*K_GLOBAL/32+j)); // } // } // } // if (warpId == 0 && laneId == 0 && blockIdx.x==0) { // for(int b=0; b<xb; b++) { // for(int i=0; i<N_GLOBAL; i++) { // for(int j=0; j<K_GLOBAL/32; j++) { // printf("bit: %d, X[%d][%d]: %x\n", b, i, j, *((int*)X+b*X_bit_offset + i*K_GLOBAL/32+j)); // } // } // } // } // if (warpId == 0 && laneId == 0 && blockIdx.x==0) { // for(int i=0; i<M_GLOBAL; i++) { // for(int j=0; j<N_GLOBAL; j++) { // printf("D[%d][%d]: %d\n", i, j, D[i*N_GLOBAL+j]); // } // } // } for (unsigned int block_pos = blockIdx.x;; block_pos += gridDim.x) { const unsigned int block_tile_i = block_pos / (N_GLOBAL/64) * 21; const unsigned int block_tile_j = block_pos % (N_GLOBAL/64) * 64; // Stop when there are no more D matrix tiles to compute in this CTA. if (block_tile_i >= M_GLOBAL) { break; } wmma::fragment<wmma::accumulator, M, N, K, int> c[WARP_COL_TILES] [WARP_ROW_TILES]; for(int i=0; i < WARP_COL_TILES; i++) for(int j = 0; j < WARP_ROW_TILES; j++) wmma::fill_fragment(c[i][j], 0); // Go through the global K dimension by a fixed step at a time. #pragma unroll for (int tile_k = 0; tile_k < K_TILES; tile_k += CHUNK_K) { // Offset in shared memory from which the B matrix is stored. const size_t shmem_idx_b_off = 64; // TODO: This BLOCK_COL_TILES may be selected to improve performance. Maybe moved outside the for loop. // First half of the warp copies the first row / column of the matrix, // the second half of the warp copies the next. int *lane_ptr; int *shmem_ptr; if (warpId < 4) { lane_ptr = (int*)W + (threadIdx.x/42)*W_bit_offset*4 + block_tile_i*K_GLOBAL/32 + (threadIdx.x%42)/2*K_GLOBAL/32 + (threadIdx.x%42)%2 + tile_k*4; // lane_ptr = (int*)W + (threadIdx.x/42)*W_bit_offset*4; shmem_ptr = (int*)shmem + (threadIdx.x/2)*4*(CHUNK_K+SKEW) + threadIdx.x%2; if (threadIdx.x < 126) *shmem_ptr = *lane_ptr; } else { lane_ptr = (int*)&X[block_tile_j * ROW_BIT + (warpId-4)*16*ROW_BIT] + laneId/4*ROW_BIT*4 + laneId%4 + tile_k*4; shmem_ptr = (int*)shmem + warpId*16*4*(CHUNK_K+SKEW) + (laneId/4)*4*(CHUNK_K+SKEW) + laneId%4; *shmem_ptr = *lane_ptr; } if (warpId < 4) { lane_ptr += 2; shmem_ptr += 2; if (threadIdx.x < 126) *shmem_ptr = *lane_ptr; } else { lane_ptr += 8*ROW_BIT*4; shmem_ptr += 8*4*(CHUNK_K+SKEW); *shmem_ptr = *lane_ptr; } __syncthreads(); // if (warpId == 0 && laneId == 0 && blockIdx.x==0) { // for(int i=0; i<63; i+=21) { // printf("Load from GL. i: %d, val: %d %d %d %d \n", i, *((int*)&shmem[i][0]+0), *((int*)&shmem[i][0]+1), *((int*)&shmem[i][0]+2), *((int*)&shmem[i][0]+3)); // } // } // Compute a grid of C matrix tiles in each warp. #pragma unroll for (int k_step = 0; k_step < CHUNK_K; k_step++) { wmma::fragment<wmma::matrix_a, M, N, K, precision::b1, wmma::row_major> a[WARP_COL_TILES]; wmma::fragment<wmma::matrix_b, M, N, K, precision::b1, wmma::col_major> b[WARP_ROW_TILES]; #pragma unroll for (int i = 0; i < WARP_COL_TILES; i++) { size_t shmem_idx_a = (warpId / 2) * M * 2 + (i * M); const int4 *tile_ptr = &shmem[shmem_idx_a][k_step]; wmma::load_matrix_sync(a[i], tile_ptr, (CHUNK_K + SKEW)*128); // if (warpId == 0 && laneId == 0 && blockIdx.x==0) { // for(int t=0; t<a[i].num_elements; t++) { // printf("a[%d].x[%d]: %x\n", i, t, a[i].x[t]); // } // printf("shmem_idx_a: %d, k_step: %d\n", shmem_idx_a, k_step); // } #pragma unroll for (int j = 0; j < WARP_ROW_TILES; j++) { if (i == 0) { // Load the B matrix fragment once, because it is going to be // reused against the other A matrix fragments. size_t shmem_idx_b = shmem_idx_b_off + (WARP_ROW_TILES * N) * (warpId % 2) + (j * N); const int4 *tile_ptr = &shmem[shmem_idx_b][k_step * (K/128)]; wmma::load_matrix_sync(b[j], tile_ptr, (CHUNK_K + SKEW)*128); } // printf("ckpt4\n"); wmma::bmma_sync(c[i][j], a[i], b[j], c[i][j], bmmaBitOpAND); } } } __syncthreads(); } // This pointer is used to access the C and D matrix tiles this warp computes. int *shmem_warp_tile_ptr = (int*)&shmem[0][0] + (warpId / 2) * 64 * 16 + (warpId % 2) * 32; // Will be used only when writing back D. May be moved outside the for loop. TODO. // Store the D fragments to shared memory. #pragma unroll for (int i = 0; i < WARP_COL_TILES; i++) { #pragma unroll for (int j = 0; j < WARP_ROW_TILES; j++) { int *tile_ptr = shmem_warp_tile_ptr + i * 64 * 8 + j * 8; wmma::store_matrix_sync(tile_ptr, c[i][j], 64, C_LAYOUT); } } __syncthreads(); // if (warpId == 0 && laneId == 0 && blockIdx.x==0) { // for(int i=62; i<64; i++) { // for(int j=0; j<64; j++) { // printf("i: %d, j: %d, val: %d\n", i, j, *((int*)&shmem[0][0]+i*64+j)); // } // } // } #pragma unroll for(int i=0; i<5; i++) { // This pointer is used to stream the C and D matrices block-wide tile to and from shared memory. // int *shmem_warp_stream_ptr = (int*)&shmem[0][0] + warpId * SHMEM_STRIDE * M; // Will be used only when writing back D. Maybe moved outside the for loop. TODO. int *shmem_warp_stream_ptr = (int*)&shmem[0][0]+threadIdx.x+i*4*64; int val = *(shmem_warp_stream_ptr) + 2*(*(shmem_warp_stream_ptr+21*64)) + 4*(*(shmem_warp_stream_ptr+42*64)); *(D + block_tile_i*N_GLOBAL + block_tile_j + i*4*N_GLOBAL + threadIdx.x/64*N_GLOBAL + threadIdx.x%64) = val; } if(threadIdx.x < 64) { int *shmem_warp_stream_ptr = (int*)&shmem[0][0]+threadIdx.x+20*64; int val = *(shmem_warp_stream_ptr) + 2*(*(shmem_warp_stream_ptr+21*64)) + 4*(*(shmem_warp_stream_ptr+42*64)); *(D + block_tile_i*N_GLOBAL + block_tile_j + 20*N_GLOBAL + threadIdx.x/64*N_GLOBAL + threadIdx.x%64) = val; } __syncthreads(); } } void init_matrices(int4 *W, int4 *X, int M_GLOBAL, int N_GLOBAL, int K_GLOBAL, int W_BIT, int X_BIT){ int *W_int = (int*) W; int *X_int = (int*) X; for(int b=0; b<W_BIT; b++) { for(int i = 0; i < M_GLOBAL; i++) { for(int j = 0; j < K_GLOBAL/32; j++) { // W_int[b*M_GLOBAL*K_GLOBAL/32 + i*K_GLOBAL/32+j] = 0xFFFFFFFF; // W_int[b*M_GLOBAL*K_GLOBAL/32 + i*K_GLOBAL/32+j] = i; W_int[b*M_GLOBAL*K_GLOBAL/32 + i*K_GLOBAL/32+j] = rand(); } } } for(int b = 0; b<X_BIT; b++) { for(int i = 0; i < N_GLOBAL; i++) { for(int j = 0; j < K_GLOBAL/32; j++) { // X_int[b*N_GLOBAL*K_GLOBAL/32 + i*K_GLOBAL/32+j] = 0xFFFFFFFF; // X_int[b*N_GLOBAL*K_GLOBAL/32 + i*K_GLOBAL/32+j] = i*M_GLOBAL + j; X_int[b*N_GLOBAL*K_GLOBAL/32 + i*K_GLOBAL/32+j] = rand(); } } } } int popcnt(int i) { // Java: use int, and use >>> instead of >> // C or C++: use int i = i - ((i >> 1) & 0x55555555); i = (i & 0x33333333) + ((i >> 2) & 0x33333333); return (((i + (i >> 4)) & 0x0F0F0F0F) * 0x01010101) >> 24; } int int_pow(int base, int exp) { int result = 1; while (exp) { if (exp % 2) result *= base; exp /= 2; base *= base; } return result; } void compute_ref(int4 *W, int4 *X, int *ref_C, int M_GLOBAL, int N_GLOBAL, int K_GLOBAL, int W_BIT, int X_BIT) { int *W_int = (int*) W; int *X_int = (int*) X; for (int m = 0; m < M_GLOBAL; m++) { for (int n = 0; n < N_GLOBAL; n++) { int tmp = 0; for(int xb=0; xb<X_BIT; xb++) { int X_Multiplier = int_pow(2,xb); for(int wb=0; wb<W_BIT; wb++) { int W_Multiplier = int_pow(2,wb); for(int k_tile=0; k_tile<K_GLOBAL/32; k_tile++) { int w_int = W_int[wb*M_GLOBAL*K_GLOBAL/32 + m*K_GLOBAL/32 + k_tile]; int x_int = X_int[xb*N_GLOBAL*K_GLOBAL/32 + n*K_GLOBAL/32 + k_tile]; for(int k=0; k<32; k++) { int mask = 1; int x_val = ((mask << k) & x_int) >> k; int w_val = ((mask << k) & w_int) >> k; tmp += X_Multiplier * W_Multiplier * x_val * w_val; } } } } ref_C[m*N_GLOBAL+n]= tmp; } } } void compute_ref_pack(int4 *W, int4 *X, int *ref_C, int M_GLOBAL, int N_GLOBAL, int K_GLOBAL, int X_BIT, int W_BIT, int OUT_BIT) { // Assume K_GLOBAL and N_GLOBAL is a multiplier of 32. int *W_int = (int*) W; int *X_int = (int*) X; int C_ref_before_decompose[M_GLOBAL*N_GLOBAL]; for (int m = 0; m < M_GLOBAL; m++) { for (int n = 0; n < N_GLOBAL; n++) { int tmp = 0; for(int xb=0; xb<X_BIT; xb++) { int X_Multiplier = int_pow(2,xb); for(int wb=0; wb<W_BIT; wb++) { int W_Multiplier = int_pow(2,wb); for(int k_tile=0; k_tile<K_GLOBAL/32; k_tile++) { int w_int = W_int[wb*M_GLOBAL*K_GLOBAL/32 + m*K_GLOBAL/32 + k_tile]; int x_int = X_int[xb*N_GLOBAL*K_GLOBAL/32 + n*K_GLOBAL/32 + k_tile]; for(int k=0; k<32; k++) { int mask = 1; int x_val = ((mask << k) & x_int) >> k; int w_val = ((mask << k) & w_int) >> k; tmp += X_Multiplier * W_Multiplier * x_val * w_val; } } } } C_ref_before_decompose[m*K_GLOBAL+n]= tmp; } } for(int m=0; m<M_GLOBAL; m++) { for(int n_tile=0; n_tile<N_GLOBAL/32; n_tile++) { int val[OUT_BIT]; for(int b=0; b<OUT_BIT; b++) val[b] = 0; for(int n=0; n<32; n++) { int tmp = C_ref_before_decompose[m*K_GLOBAL+n_tile*32+n]; tmp = (tmp - 128); // Can be modified for other quantized parameters. for(int b=0; b<OUT_BIT; b++) { int mask = 1; val[b] = val[b] << 1; val[b] = val[b] | ((mask<<b) & tmp); } } for(int b=0; b<OUT_BIT; b++) { ref_C[b*M_GLOBAL*N_GLOBAL/32+m*N_GLOBAL/32+n_tile/32] = val[b]; } } } } void validate_results(int *C, int* ref_C, int M_, int N_) { // Assume K_GLOBAL and N_GLOBAL is a multiplier of 32. printf("Checking computed result for correctness: "); bool correct = true; double eps = 1.e-6; // machine zero for(int i = 0; i < M_; i++) { for(int j = 0; j < N_; j++) { int idx = i*N_+j; double dst = fabs(C[idx] - ref_C[idx]); if (dst > eps) { // printf("Error! Matrix[%05d]=%.8f, ref=%.8f error term is > %E\n",, eps); printf("i: %d, j: %d, C: %d, ref_C: %d\n", i, j, C[idx], ref_C[idx]); // printf("non equal\n"); correct = false; } } } printf("%s\n", correct ? "Result = PASS" : "Result = FAIL"); } void validate_results_pack(int *C, int* ref_C, int M_, int N_, int OUT_BIT) { // Assume K_GLOBAL and N_GLOBAL is a multiplier of 32. printf("Checking computed result with pack for correctness: "); bool correct = true; double eps = 1.e-6; // machine zero for(int m = 0; m < M_; m++) { for(int n_tile = 0; n_tile < N_/32; n_tile++) { for(int b=0; b<OUT_BIT; b++) { int idx = b*M_*N_/32 + m*N_/32+n_tile; double dst = fabs(C[idx] - ref_C[idx]); if (dst > eps) { // printf("Error! Matrix[%05d]=%.8f, ref=%.8f error term is > %E\n",, eps); printf("m: %d, n_tile: %d, b: %d, C: %d, ref_C: %d\n", m, n_tile, b, C[idx], ref_C[idx]); // printf("non equal\n"); correct = false; } } } } printf("%s\n", correct ? "Result = PASS" : "Result = FAIL"); } #define verify_output int main(int argc, char **argv) { int dev = findCudaDevice(argc, (const char **)argv); hipDeviceProp_t deviceProp; checkCudaErrors(hipGetDeviceProperties(&deviceProp, dev)); int X_BIT = 1; int W_BIT = 3; int M_GLOBAL_no_pad = 64; int M_GLOBAL; if (M_GLOBAL_no_pad%21 != 0) { M_GLOBAL = (M_GLOBAL_no_pad/21+1)*21; } else { M_GLOBAL = M_GLOBAL_no_pad; } for (int N_GLOBAL=128; N_GLOBAL<=1024; N_GLOBAL += 128 ) { // int N_GLOBAL = 64; // int N_GLOBAL = M_GLOBAL_no_pad; int K_GLOBAL = N_GLOBAL; int4 *X = NULL; int4 *W = NULL; int *Output = NULL; checkCudaErrors( hipMalloc(reinterpret_cast<void **>(&W), sizeof(int4) * M_GLOBAL * (K_GLOBAL/128)* W_BIT)); checkCudaErrors( hipMalloc(reinterpret_cast<void **>(&X), sizeof(int4) * N_GLOBAL * (K_GLOBAL/128) * X_BIT)); checkCudaErrors(hipMalloc(reinterpret_cast<void **>(&Output), sizeof(int) * M_GLOBAL * N_GLOBAL)); #ifdef verify_output int4 *W_h = NULL; int4 *X_h = NULL; int *Output_h = NULL; W_h = (int4 *)malloc(sizeof(int4) * M_GLOBAL * (K_GLOBAL/128) * W_BIT); X_h = (int4 *)malloc(sizeof(int4) * N_GLOBAL * (K_GLOBAL/128) * X_BIT); Output_h = (int *)malloc(sizeof(int) * M_GLOBAL * N_GLOBAL); printf("Preparing validation data for GPU...\n"); init_matrices(W_h, X_h, M_GLOBAL, N_GLOBAL, K_GLOBAL, W_BIT, X_BIT); checkCudaErrors(hipMemcpy(W, W_h, sizeof(int4) * M_GLOBAL * (K_GLOBAL/128) * W_BIT, hipMemcpyHostToDevice)); checkCudaErrors(hipMemcpy(X, X_h, sizeof(int4) * N_GLOBAL * (K_GLOBAL/128) * X_BIT, hipMemcpyHostToDevice)); #endif int SHMEM_SZ = 65536; checkCudaErrors(hipFuncSetAttribute( apmm_w3a1, hipFuncAttributeMaxDynamicSharedMemorySize, SHMEM_SZ)); // Run ours NUM_PROFILES times and record time. float bmma_ms_avg = 0.0f; int NUM_PROFILES = 1000; for(int iter=0; iter<NUM_PROFILES; ++iter){ float bmma_ms = 0.0f; hipEvent_t bmma_start; hipEvent_t bmma_end; hipEventCreate(&bmma_start); hipEventCreate(&bmma_end); hipEventRecord(bmma_start); checkKernelErrors( hipLaunchKernelGGL(( (apmm_w3a1), dim3(deviceProp.multiProcessorCount), dim3(THREADS_PER_BLOCK), SHMEM_SZ, 0, W, X, Output, M_GLOBAL, N_GLOBAL, K_GLOBAL, W_BIT, X_BIT))); hipEventRecord(bmma_end); hipEventSynchronize(bmma_end); hipEventElapsedTime(&bmma_ms, bmma_start, bmma_end); hipEventDestroy(bmma_start); hipEventDestroy(bmma_end); bmma_ms_avg += bmma_ms; } bmma_ms_avg = bmma_ms_avg/(float)NUM_PROFILES; printf("V85, 64x64. M_GLOBAL: %d, N_GLOBAL: %d, K_GLOBAL: %d, X_BIT: %d, W_BIT: %d\n", M_GLOBAL_no_pad, N_GLOBAL, K_GLOBAL, X_BIT, W_BIT); printf("Time: %f ms\n", bmma_ms_avg); printf("TOPS: %.2f\n", (((double)(M_GLOBAL_no_pad) * N_GLOBAL * K_GLOBAL * 2)/(bmma_ms_avg/1000.)) / 1e12); #ifdef verify_output printf("Validating results...\n"); checkCudaErrors(hipMemcpy(Output_h, Output, sizeof(int) * M_GLOBAL * N_GLOBAL, hipMemcpyDeviceToHost)); int *Output_ref = (int *)malloc(sizeof(int) * M_GLOBAL * N_GLOBAL); /* Copmpute reference matrix on CPU */ compute_ref(W_h, X_h, Output_ref, M_GLOBAL, N_GLOBAL, K_GLOBAL, W_BIT, X_BIT); /* validation results */ validate_results(Output_h, Output_ref, M_GLOBAL_no_pad, N_GLOBAL); free(W_h); free(X_h); free(Output_h); free(Output_ref); #endif checkCudaErrors(hipFree(reinterpret_cast<void *>(W))); checkCudaErrors(hipFree(reinterpret_cast<void *>(X))); checkCudaErrors(hipFree(reinterpret_cast<void *>(Output))); } return EXIT_SUCCESS; }
bcc5c89ab1942c5977d7c75893f6124f12bd3872.cu
/* 1-bit BMMA code. Runs at 500TOPS for matrix size of 4096x4096x8192. Borrows largely from CUDA-SDK. By Boyuan */ #include <assert.h> #include <cuda.h> #include <mma.h> #include <stdio.h> #include <helper_cuda.h> #include <helper_functions.h> // GPU configuration. #define WARP_SIZE 32 // MMA matrix tile dimensions. #define M 8 #define N 8 #define K 128 #define C_LAYOUT wmma::mem_row_major // Implementation constants. #define WARPS_PER_BLOCK 8 #define THREADS_PER_BLOCK (WARP_SIZE * WARPS_PER_BLOCK) #define CHUNK_K 1 #define BLOCK_ROW_WARPS 2 #define BLOCK_COL_WARPS 4 #define WARP_ROW_TILES 4 #define WARP_COL_TILES 2 #define BLOCK_ROW_TILES (WARP_ROW_TILES * BLOCK_ROW_WARPS) #define BLOCK_COL_TILES (WARP_COL_TILES * BLOCK_COL_WARPS) #define GLOBAL_MEM_STRIDE N_GLOBAL #define SHMEM_STRIDE (N * BLOCK_ROW_TILES) #define SHMEM_OFFSET (N * WARP_ROW_TILES) // The macro below is used to shift rows of the A matrix and columns of the B // matrix in shared memory to minimize possible bank conflicts. Before // performing the nvcuda::wmma::mma_sync operation, the warp must load the // matrix data using the nvcuda::wmma::load_matrix_sync operation. Although the // memory access pattern is not specified for that function, each lane in the // warp can read one or multiple matrix elements from different matrix rows or // columns. For shared memory, such access can result in bank conflicts if // different rows / columns of the matrix map to the same bank. By shifting each // row and column by a few bytes, we make sure that they map to different banks, // thus reducing the number of possible bank conflicts. The number of 32 // one-byte "uint8_t" elements is chosen as the minimum possible shift because // we must keep each row and column 256-bit aligned, as required by // nvcuda::wmma::load_matrix_sync. #define SKEW 0 // Updated for int4 #define checkKernelErrors(expr) \ do { \ expr; \ \ cudaError_t __err = cudaGetLastError(); \ if (__err != cudaSuccess) { \ printf("Line %d: '%s' failed: %s\n", __LINE__, #expr, \ cudaGetErrorString(__err)); \ abort(); \ } \ } while (0) using namespace nvcuda; using namespace nvcuda::wmma::experimental; __global__ void apmm_w3a1(const int4 *W, const int4 *X, int *D, int M_GLOBAL, int N_GLOBAL, int K_GLOBAL, int wb, int xb) { // GEMM configuration. // printf("ckpt0\n"); int K_TILES = K_GLOBAL / 128; int W_bit_offset = M_GLOBAL*K_GLOBAL/128; int X_bit_offset = N_GLOBAL*K_GLOBAL/128; int ROW_BIT = K_GLOBAL/128; extern __shared__ int4 shmem[][CHUNK_K+SKEW]; // TODO: Padding opportunity may exist here. // Warp and lane identification. const unsigned int warpId = threadIdx.x / WARP_SIZE; const unsigned int laneId = threadIdx.x % WARP_SIZE; // if (warpId == 0 && laneId == 0 && blockIdx.x==0) { // for(int i=0; i<M_GLOBAL; i++) { // for(int j=0; j<K_GLOBAL/32; j++) { // printf("W[%d][%d]: %x\n", i, j, *((int*)W+i*K_GLOBAL/32+j)); // } // } // } // if (warpId == 0 && laneId == 0 && blockIdx.x==0) { // for(int b=0; b<xb; b++) { // for(int i=0; i<N_GLOBAL; i++) { // for(int j=0; j<K_GLOBAL/32; j++) { // printf("bit: %d, X[%d][%d]: %x\n", b, i, j, *((int*)X+b*X_bit_offset + i*K_GLOBAL/32+j)); // } // } // } // } // if (warpId == 0 && laneId == 0 && blockIdx.x==0) { // for(int i=0; i<M_GLOBAL; i++) { // for(int j=0; j<N_GLOBAL; j++) { // printf("D[%d][%d]: %d\n", i, j, D[i*N_GLOBAL+j]); // } // } // } for (unsigned int block_pos = blockIdx.x;; block_pos += gridDim.x) { const unsigned int block_tile_i = block_pos / (N_GLOBAL/64) * 21; const unsigned int block_tile_j = block_pos % (N_GLOBAL/64) * 64; // Stop when there are no more D matrix tiles to compute in this CTA. if (block_tile_i >= M_GLOBAL) { break; } wmma::fragment<wmma::accumulator, M, N, K, int> c[WARP_COL_TILES] [WARP_ROW_TILES]; for(int i=0; i < WARP_COL_TILES; i++) for(int j = 0; j < WARP_ROW_TILES; j++) wmma::fill_fragment(c[i][j], 0); // Go through the global K dimension by a fixed step at a time. #pragma unroll for (int tile_k = 0; tile_k < K_TILES; tile_k += CHUNK_K) { // Offset in shared memory from which the B matrix is stored. const size_t shmem_idx_b_off = 64; // TODO: This BLOCK_COL_TILES may be selected to improve performance. Maybe moved outside the for loop. // First half of the warp copies the first row / column of the matrix, // the second half of the warp copies the next. int *lane_ptr; int *shmem_ptr; if (warpId < 4) { lane_ptr = (int*)W + (threadIdx.x/42)*W_bit_offset*4 + block_tile_i*K_GLOBAL/32 + (threadIdx.x%42)/2*K_GLOBAL/32 + (threadIdx.x%42)%2 + tile_k*4; // lane_ptr = (int*)W + (threadIdx.x/42)*W_bit_offset*4; shmem_ptr = (int*)shmem + (threadIdx.x/2)*4*(CHUNK_K+SKEW) + threadIdx.x%2; if (threadIdx.x < 126) *shmem_ptr = *lane_ptr; } else { lane_ptr = (int*)&X[block_tile_j * ROW_BIT + (warpId-4)*16*ROW_BIT] + laneId/4*ROW_BIT*4 + laneId%4 + tile_k*4; shmem_ptr = (int*)shmem + warpId*16*4*(CHUNK_K+SKEW) + (laneId/4)*4*(CHUNK_K+SKEW) + laneId%4; *shmem_ptr = *lane_ptr; } if (warpId < 4) { lane_ptr += 2; shmem_ptr += 2; if (threadIdx.x < 126) *shmem_ptr = *lane_ptr; } else { lane_ptr += 8*ROW_BIT*4; shmem_ptr += 8*4*(CHUNK_K+SKEW); *shmem_ptr = *lane_ptr; } __syncthreads(); // if (warpId == 0 && laneId == 0 && blockIdx.x==0) { // for(int i=0; i<63; i+=21) { // printf("Load from GL. i: %d, val: %d %d %d %d \n", i, *((int*)&shmem[i][0]+0), *((int*)&shmem[i][0]+1), *((int*)&shmem[i][0]+2), *((int*)&shmem[i][0]+3)); // } // } // Compute a grid of C matrix tiles in each warp. #pragma unroll for (int k_step = 0; k_step < CHUNK_K; k_step++) { wmma::fragment<wmma::matrix_a, M, N, K, precision::b1, wmma::row_major> a[WARP_COL_TILES]; wmma::fragment<wmma::matrix_b, M, N, K, precision::b1, wmma::col_major> b[WARP_ROW_TILES]; #pragma unroll for (int i = 0; i < WARP_COL_TILES; i++) { size_t shmem_idx_a = (warpId / 2) * M * 2 + (i * M); const int4 *tile_ptr = &shmem[shmem_idx_a][k_step]; wmma::load_matrix_sync(a[i], tile_ptr, (CHUNK_K + SKEW)*128); // if (warpId == 0 && laneId == 0 && blockIdx.x==0) { // for(int t=0; t<a[i].num_elements; t++) { // printf("a[%d].x[%d]: %x\n", i, t, a[i].x[t]); // } // printf("shmem_idx_a: %d, k_step: %d\n", shmem_idx_a, k_step); // } #pragma unroll for (int j = 0; j < WARP_ROW_TILES; j++) { if (i == 0) { // Load the B matrix fragment once, because it is going to be // reused against the other A matrix fragments. size_t shmem_idx_b = shmem_idx_b_off + (WARP_ROW_TILES * N) * (warpId % 2) + (j * N); const int4 *tile_ptr = &shmem[shmem_idx_b][k_step * (K/128)]; wmma::load_matrix_sync(b[j], tile_ptr, (CHUNK_K + SKEW)*128); } // printf("ckpt4\n"); wmma::bmma_sync(c[i][j], a[i], b[j], c[i][j], bmmaBitOpAND); } } } __syncthreads(); } // This pointer is used to access the C and D matrix tiles this warp computes. int *shmem_warp_tile_ptr = (int*)&shmem[0][0] + (warpId / 2) * 64 * 16 + (warpId % 2) * 32; // Will be used only when writing back D. May be moved outside the for loop. TODO. // Store the D fragments to shared memory. #pragma unroll for (int i = 0; i < WARP_COL_TILES; i++) { #pragma unroll for (int j = 0; j < WARP_ROW_TILES; j++) { int *tile_ptr = shmem_warp_tile_ptr + i * 64 * 8 + j * 8; wmma::store_matrix_sync(tile_ptr, c[i][j], 64, C_LAYOUT); } } __syncthreads(); // if (warpId == 0 && laneId == 0 && blockIdx.x==0) { // for(int i=62; i<64; i++) { // for(int j=0; j<64; j++) { // printf("i: %d, j: %d, val: %d\n", i, j, *((int*)&shmem[0][0]+i*64+j)); // } // } // } #pragma unroll for(int i=0; i<5; i++) { // This pointer is used to stream the C and D matrices block-wide tile to and from shared memory. // int *shmem_warp_stream_ptr = (int*)&shmem[0][0] + warpId * SHMEM_STRIDE * M; // Will be used only when writing back D. Maybe moved outside the for loop. TODO. int *shmem_warp_stream_ptr = (int*)&shmem[0][0]+threadIdx.x+i*4*64; int val = *(shmem_warp_stream_ptr) + 2*(*(shmem_warp_stream_ptr+21*64)) + 4*(*(shmem_warp_stream_ptr+42*64)); *(D + block_tile_i*N_GLOBAL + block_tile_j + i*4*N_GLOBAL + threadIdx.x/64*N_GLOBAL + threadIdx.x%64) = val; } if(threadIdx.x < 64) { int *shmem_warp_stream_ptr = (int*)&shmem[0][0]+threadIdx.x+20*64; int val = *(shmem_warp_stream_ptr) + 2*(*(shmem_warp_stream_ptr+21*64)) + 4*(*(shmem_warp_stream_ptr+42*64)); *(D + block_tile_i*N_GLOBAL + block_tile_j + 20*N_GLOBAL + threadIdx.x/64*N_GLOBAL + threadIdx.x%64) = val; } __syncthreads(); } } void init_matrices(int4 *W, int4 *X, int M_GLOBAL, int N_GLOBAL, int K_GLOBAL, int W_BIT, int X_BIT){ int *W_int = (int*) W; int *X_int = (int*) X; for(int b=0; b<W_BIT; b++) { for(int i = 0; i < M_GLOBAL; i++) { for(int j = 0; j < K_GLOBAL/32; j++) { // W_int[b*M_GLOBAL*K_GLOBAL/32 + i*K_GLOBAL/32+j] = 0xFFFFFFFF; // W_int[b*M_GLOBAL*K_GLOBAL/32 + i*K_GLOBAL/32+j] = i; W_int[b*M_GLOBAL*K_GLOBAL/32 + i*K_GLOBAL/32+j] = rand(); } } } for(int b = 0; b<X_BIT; b++) { for(int i = 0; i < N_GLOBAL; i++) { for(int j = 0; j < K_GLOBAL/32; j++) { // X_int[b*N_GLOBAL*K_GLOBAL/32 + i*K_GLOBAL/32+j] = 0xFFFFFFFF; // X_int[b*N_GLOBAL*K_GLOBAL/32 + i*K_GLOBAL/32+j] = i*M_GLOBAL + j; X_int[b*N_GLOBAL*K_GLOBAL/32 + i*K_GLOBAL/32+j] = rand(); } } } } int popcnt(int i) { // Java: use int, and use >>> instead of >> // C or C++: use int i = i - ((i >> 1) & 0x55555555); i = (i & 0x33333333) + ((i >> 2) & 0x33333333); return (((i + (i >> 4)) & 0x0F0F0F0F) * 0x01010101) >> 24; } int int_pow(int base, int exp) { int result = 1; while (exp) { if (exp % 2) result *= base; exp /= 2; base *= base; } return result; } void compute_ref(int4 *W, int4 *X, int *ref_C, int M_GLOBAL, int N_GLOBAL, int K_GLOBAL, int W_BIT, int X_BIT) { int *W_int = (int*) W; int *X_int = (int*) X; for (int m = 0; m < M_GLOBAL; m++) { for (int n = 0; n < N_GLOBAL; n++) { int tmp = 0; for(int xb=0; xb<X_BIT; xb++) { int X_Multiplier = int_pow(2,xb); for(int wb=0; wb<W_BIT; wb++) { int W_Multiplier = int_pow(2,wb); for(int k_tile=0; k_tile<K_GLOBAL/32; k_tile++) { int w_int = W_int[wb*M_GLOBAL*K_GLOBAL/32 + m*K_GLOBAL/32 + k_tile]; int x_int = X_int[xb*N_GLOBAL*K_GLOBAL/32 + n*K_GLOBAL/32 + k_tile]; for(int k=0; k<32; k++) { int mask = 1; int x_val = ((mask << k) & x_int) >> k; int w_val = ((mask << k) & w_int) >> k; tmp += X_Multiplier * W_Multiplier * x_val * w_val; } } } } ref_C[m*N_GLOBAL+n]= tmp; } } } void compute_ref_pack(int4 *W, int4 *X, int *ref_C, int M_GLOBAL, int N_GLOBAL, int K_GLOBAL, int X_BIT, int W_BIT, int OUT_BIT) { // Assume K_GLOBAL and N_GLOBAL is a multiplier of 32. int *W_int = (int*) W; int *X_int = (int*) X; int C_ref_before_decompose[M_GLOBAL*N_GLOBAL]; for (int m = 0; m < M_GLOBAL; m++) { for (int n = 0; n < N_GLOBAL; n++) { int tmp = 0; for(int xb=0; xb<X_BIT; xb++) { int X_Multiplier = int_pow(2,xb); for(int wb=0; wb<W_BIT; wb++) { int W_Multiplier = int_pow(2,wb); for(int k_tile=0; k_tile<K_GLOBAL/32; k_tile++) { int w_int = W_int[wb*M_GLOBAL*K_GLOBAL/32 + m*K_GLOBAL/32 + k_tile]; int x_int = X_int[xb*N_GLOBAL*K_GLOBAL/32 + n*K_GLOBAL/32 + k_tile]; for(int k=0; k<32; k++) { int mask = 1; int x_val = ((mask << k) & x_int) >> k; int w_val = ((mask << k) & w_int) >> k; tmp += X_Multiplier * W_Multiplier * x_val * w_val; } } } } C_ref_before_decompose[m*K_GLOBAL+n]= tmp; } } for(int m=0; m<M_GLOBAL; m++) { for(int n_tile=0; n_tile<N_GLOBAL/32; n_tile++) { int val[OUT_BIT]; for(int b=0; b<OUT_BIT; b++) val[b] = 0; for(int n=0; n<32; n++) { int tmp = C_ref_before_decompose[m*K_GLOBAL+n_tile*32+n]; tmp = (tmp - 128); // Can be modified for other quantized parameters. for(int b=0; b<OUT_BIT; b++) { int mask = 1; val[b] = val[b] << 1; val[b] = val[b] | ((mask<<b) & tmp); } } for(int b=0; b<OUT_BIT; b++) { ref_C[b*M_GLOBAL*N_GLOBAL/32+m*N_GLOBAL/32+n_tile/32] = val[b]; } } } } void validate_results(int *C, int* ref_C, int M_, int N_) { // Assume K_GLOBAL and N_GLOBAL is a multiplier of 32. printf("Checking computed result for correctness: "); bool correct = true; double eps = 1.e-6; // machine zero for(int i = 0; i < M_; i++) { for(int j = 0; j < N_; j++) { int idx = i*N_+j; double dst = fabs(C[idx] - ref_C[idx]); if (dst > eps) { // printf("Error! Matrix[%05d]=%.8f, ref=%.8f error term is > %E\n",, eps); printf("i: %d, j: %d, C: %d, ref_C: %d\n", i, j, C[idx], ref_C[idx]); // printf("non equal\n"); correct = false; } } } printf("%s\n", correct ? "Result = PASS" : "Result = FAIL"); } void validate_results_pack(int *C, int* ref_C, int M_, int N_, int OUT_BIT) { // Assume K_GLOBAL and N_GLOBAL is a multiplier of 32. printf("Checking computed result with pack for correctness: "); bool correct = true; double eps = 1.e-6; // machine zero for(int m = 0; m < M_; m++) { for(int n_tile = 0; n_tile < N_/32; n_tile++) { for(int b=0; b<OUT_BIT; b++) { int idx = b*M_*N_/32 + m*N_/32+n_tile; double dst = fabs(C[idx] - ref_C[idx]); if (dst > eps) { // printf("Error! Matrix[%05d]=%.8f, ref=%.8f error term is > %E\n",, eps); printf("m: %d, n_tile: %d, b: %d, C: %d, ref_C: %d\n", m, n_tile, b, C[idx], ref_C[idx]); // printf("non equal\n"); correct = false; } } } } printf("%s\n", correct ? "Result = PASS" : "Result = FAIL"); } #define verify_output int main(int argc, char **argv) { int dev = findCudaDevice(argc, (const char **)argv); cudaDeviceProp deviceProp; checkCudaErrors(cudaGetDeviceProperties(&deviceProp, dev)); int X_BIT = 1; int W_BIT = 3; int M_GLOBAL_no_pad = 64; int M_GLOBAL; if (M_GLOBAL_no_pad%21 != 0) { M_GLOBAL = (M_GLOBAL_no_pad/21+1)*21; } else { M_GLOBAL = M_GLOBAL_no_pad; } for (int N_GLOBAL=128; N_GLOBAL<=1024; N_GLOBAL += 128 ) { // int N_GLOBAL = 64; // int N_GLOBAL = M_GLOBAL_no_pad; int K_GLOBAL = N_GLOBAL; int4 *X = NULL; int4 *W = NULL; int *Output = NULL; checkCudaErrors( cudaMalloc(reinterpret_cast<void **>(&W), sizeof(int4) * M_GLOBAL * (K_GLOBAL/128)* W_BIT)); checkCudaErrors( cudaMalloc(reinterpret_cast<void **>(&X), sizeof(int4) * N_GLOBAL * (K_GLOBAL/128) * X_BIT)); checkCudaErrors(cudaMalloc(reinterpret_cast<void **>(&Output), sizeof(int) * M_GLOBAL * N_GLOBAL)); #ifdef verify_output int4 *W_h = NULL; int4 *X_h = NULL; int *Output_h = NULL; W_h = (int4 *)malloc(sizeof(int4) * M_GLOBAL * (K_GLOBAL/128) * W_BIT); X_h = (int4 *)malloc(sizeof(int4) * N_GLOBAL * (K_GLOBAL/128) * X_BIT); Output_h = (int *)malloc(sizeof(int) * M_GLOBAL * N_GLOBAL); printf("Preparing validation data for GPU...\n"); init_matrices(W_h, X_h, M_GLOBAL, N_GLOBAL, K_GLOBAL, W_BIT, X_BIT); checkCudaErrors(cudaMemcpy(W, W_h, sizeof(int4) * M_GLOBAL * (K_GLOBAL/128) * W_BIT, cudaMemcpyHostToDevice)); checkCudaErrors(cudaMemcpy(X, X_h, sizeof(int4) * N_GLOBAL * (K_GLOBAL/128) * X_BIT, cudaMemcpyHostToDevice)); #endif int SHMEM_SZ = 65536; checkCudaErrors(cudaFuncSetAttribute( apmm_w3a1, cudaFuncAttributeMaxDynamicSharedMemorySize, SHMEM_SZ)); // Run ours NUM_PROFILES times and record time. float bmma_ms_avg = 0.0f; int NUM_PROFILES = 1000; for(int iter=0; iter<NUM_PROFILES; ++iter){ float bmma_ms = 0.0f; cudaEvent_t bmma_start; cudaEvent_t bmma_end; cudaEventCreate(&bmma_start); cudaEventCreate(&bmma_end); cudaEventRecord(bmma_start); checkKernelErrors( (apmm_w3a1<<<deviceProp.multiProcessorCount, THREADS_PER_BLOCK, SHMEM_SZ>>>(W, X, Output, M_GLOBAL, N_GLOBAL, K_GLOBAL, W_BIT, X_BIT))); cudaEventRecord(bmma_end); cudaEventSynchronize(bmma_end); cudaEventElapsedTime(&bmma_ms, bmma_start, bmma_end); cudaEventDestroy(bmma_start); cudaEventDestroy(bmma_end); bmma_ms_avg += bmma_ms; } bmma_ms_avg = bmma_ms_avg/(float)NUM_PROFILES; printf("V85, 64x64. M_GLOBAL: %d, N_GLOBAL: %d, K_GLOBAL: %d, X_BIT: %d, W_BIT: %d\n", M_GLOBAL_no_pad, N_GLOBAL, K_GLOBAL, X_BIT, W_BIT); printf("Time: %f ms\n", bmma_ms_avg); printf("TOPS: %.2f\n", (((double)(M_GLOBAL_no_pad) * N_GLOBAL * K_GLOBAL * 2)/(bmma_ms_avg/1000.)) / 1e12); #ifdef verify_output printf("Validating results...\n"); checkCudaErrors(cudaMemcpy(Output_h, Output, sizeof(int) * M_GLOBAL * N_GLOBAL, cudaMemcpyDeviceToHost)); int *Output_ref = (int *)malloc(sizeof(int) * M_GLOBAL * N_GLOBAL); /* Copmpute reference matrix on CPU */ compute_ref(W_h, X_h, Output_ref, M_GLOBAL, N_GLOBAL, K_GLOBAL, W_BIT, X_BIT); /* validation results */ validate_results(Output_h, Output_ref, M_GLOBAL_no_pad, N_GLOBAL); free(W_h); free(X_h); free(Output_h); free(Output_ref); #endif checkCudaErrors(cudaFree(reinterpret_cast<void *>(W))); checkCudaErrors(cudaFree(reinterpret_cast<void *>(X))); checkCudaErrors(cudaFree(reinterpret_cast<void *>(Output))); } return EXIT_SUCCESS; }
198074e99dc9b3f0cb5bdcaf39b34813b8f1049b.hip
// !!! This is a file automatically generated by hipify!!! #include <stdbool.h> #include <stdio.h> #include <string.h> #include <getopt.h> #include <hiprand/hiprand_kernel.h> #include <stdlib.h> #include <hip/hip_runtime.h> #include <sys/time.h> #include "LSTMDeltaKernel.cu" #include<chrono> #include<iostream> using namespace std; using namespace std::chrono; int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}}; int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}}; int main(int argc, char **argv) { hipSetDevice(0); char* p;int matrix_len=strtol(argv[1], &p, 10); for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){ for(int block_looper=0;block_looper<20;block_looper++){ int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1]; float *cellStateErrors = NULL; hipMalloc(&cellStateErrors, XSIZE*YSIZE); float *outputGateDeltas = NULL; hipMalloc(&outputGateDeltas, XSIZE*YSIZE); float *cellStates = NULL; hipMalloc(&cellStates, XSIZE*YSIZE); float *outputGateActivations = NULL; hipMalloc(&outputGateActivations, XSIZE*YSIZE); float *outputGateActivationDerivatives = NULL; hipMalloc(&outputGateActivationDerivatives, XSIZE*YSIZE); float *deltas = NULL; hipMalloc(&deltas, XSIZE*YSIZE); int cellCount = 1; int cellsPerBlock = 1; int iXSIZE= XSIZE; int iYSIZE= YSIZE; while(iXSIZE%BLOCKX!=0) { iXSIZE++; } while(iYSIZE%BLOCKY!=0) { iYSIZE++; } dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY); dim3 threadBlock(BLOCKX, BLOCKY); hipFree(0);hipLaunchKernelGGL(( LSTMDeltaKernel), dim3(gridBlock),dim3(threadBlock), 0, 0, cellStateErrors,outputGateDeltas,cellStates,outputGateActivations,outputGateActivationDerivatives,deltas,cellCount,cellsPerBlock); hipDeviceSynchronize(); for (int loop_counter = 0; loop_counter < 10; ++loop_counter) {hipLaunchKernelGGL(( LSTMDeltaKernel), dim3(gridBlock),dim3(threadBlock), 0, 0, cellStateErrors,outputGateDeltas,cellStates,outputGateActivations,outputGateActivationDerivatives,deltas,cellCount,cellsPerBlock); } auto start = steady_clock::now(); for (int loop_counter = 0; loop_counter < 1000; loop_counter++) {hipLaunchKernelGGL(( LSTMDeltaKernel), dim3(gridBlock),dim3(threadBlock), 0, 0, cellStateErrors,outputGateDeltas,cellStates,outputGateActivations,outputGateActivationDerivatives,deltas,cellCount,cellsPerBlock); } auto end = steady_clock::now(); auto usecs = duration_cast<duration<float, microseconds::period> >(end - start); cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl; } }}
198074e99dc9b3f0cb5bdcaf39b34813b8f1049b.cu
#include <stdbool.h> #include <stdio.h> #include <string.h> #include <getopt.h> #include <curand_kernel.h> #include <stdlib.h> #include <cuda.h> #include <sys/time.h> #include "LSTMDeltaKernel.cu" #include<chrono> #include<iostream> using namespace std; using namespace std::chrono; int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}}; int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}}; int main(int argc, char **argv) { cudaSetDevice(0); char* p;int matrix_len=strtol(argv[1], &p, 10); for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){ for(int block_looper=0;block_looper<20;block_looper++){ int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1]; float *cellStateErrors = NULL; cudaMalloc(&cellStateErrors, XSIZE*YSIZE); float *outputGateDeltas = NULL; cudaMalloc(&outputGateDeltas, XSIZE*YSIZE); float *cellStates = NULL; cudaMalloc(&cellStates, XSIZE*YSIZE); float *outputGateActivations = NULL; cudaMalloc(&outputGateActivations, XSIZE*YSIZE); float *outputGateActivationDerivatives = NULL; cudaMalloc(&outputGateActivationDerivatives, XSIZE*YSIZE); float *deltas = NULL; cudaMalloc(&deltas, XSIZE*YSIZE); int cellCount = 1; int cellsPerBlock = 1; int iXSIZE= XSIZE; int iYSIZE= YSIZE; while(iXSIZE%BLOCKX!=0) { iXSIZE++; } while(iYSIZE%BLOCKY!=0) { iYSIZE++; } dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY); dim3 threadBlock(BLOCKX, BLOCKY); cudaFree(0); LSTMDeltaKernel<<<gridBlock,threadBlock>>>(cellStateErrors,outputGateDeltas,cellStates,outputGateActivations,outputGateActivationDerivatives,deltas,cellCount,cellsPerBlock); cudaDeviceSynchronize(); for (int loop_counter = 0; loop_counter < 10; ++loop_counter) { LSTMDeltaKernel<<<gridBlock,threadBlock>>>(cellStateErrors,outputGateDeltas,cellStates,outputGateActivations,outputGateActivationDerivatives,deltas,cellCount,cellsPerBlock); } auto start = steady_clock::now(); for (int loop_counter = 0; loop_counter < 1000; loop_counter++) { LSTMDeltaKernel<<<gridBlock,threadBlock>>>(cellStateErrors,outputGateDeltas,cellStates,outputGateActivations,outputGateActivationDerivatives,deltas,cellCount,cellsPerBlock); } auto end = steady_clock::now(); auto usecs = duration_cast<duration<float, microseconds::period> >(end - start); cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl; } }}
06289c92c0d01a547017340369d7a3331ed730bd.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include "includes.h" __device__ int is_a_match(char *attempt) { char plain_password1[] = "BP9843"; char plain_password2[] = "RP6870"; char plain_password3[] = "AP6498"; char plain_password4[] = "IP1354"; char *m = attempt; char *s = attempt; char *c = attempt; char *d = attempt; char *p1 = plain_password1; char *p2 = plain_password2; char *p3 = plain_password3; char *p4 = plain_password4; while(*m == *p1) { if(*m == '\0') { printf("Password: %s\n",plain_password1); break; } m++; p1++; }while(*s == *p2) { if(*s == '\0') { printf("Password: %s\n",plain_password2); break; } s++; p2++; } while(*c == *p3) { if(*c == '\0') { printf("Password: %s\n",plain_password3); break; } c++; p3++; } while(*d == *p4) { if(*d == '\0') { printf("Password: %s\n",plain_password4); return 1; } d++; p4++; } return 0; } __global__ void kernel() { char i1,i2,i3,i4; //variables char password[7]; password[6] = '\0'; //block id thread id initilized int i = blockIdx.x+65; int j = threadIdx.x+65; char firstMatch = i; char secondMatch = j; password[0] = firstMatch; password[1] = secondMatch; for(i1='0'; i1<='9'; i1++){ for(i2='0'; i2<='9'; i2++){ for(i3='0'; i3<='9'; i3++){ for(i4='0'; i4<='9'; i4++){ password[2] = i1; password[3] = i2; password[4] = i3; password[5] = i4; if(is_a_match(password)) { } else { //printf("tried: %s\n", password); }} } } } }
06289c92c0d01a547017340369d7a3331ed730bd.cu
#include "includes.h" __device__ int is_a_match(char *attempt) { char plain_password1[] = "BP9843"; char plain_password2[] = "RP6870"; char plain_password3[] = "AP6498"; char plain_password4[] = "IP1354"; char *m = attempt; char *s = attempt; char *c = attempt; char *d = attempt; char *p1 = plain_password1; char *p2 = plain_password2; char *p3 = plain_password3; char *p4 = plain_password4; while(*m == *p1) { if(*m == '\0') { printf("Password: %s\n",plain_password1); break; } m++; p1++; }while(*s == *p2) { if(*s == '\0') { printf("Password: %s\n",plain_password2); break; } s++; p2++; } while(*c == *p3) { if(*c == '\0') { printf("Password: %s\n",plain_password3); break; } c++; p3++; } while(*d == *p4) { if(*d == '\0') { printf("Password: %s\n",plain_password4); return 1; } d++; p4++; } return 0; } __global__ void kernel() { char i1,i2,i3,i4; //variables char password[7]; password[6] = '\0'; //block id thread id initilized int i = blockIdx.x+65; int j = threadIdx.x+65; char firstMatch = i; char secondMatch = j; password[0] = firstMatch; password[1] = secondMatch; for(i1='0'; i1<='9'; i1++){ for(i2='0'; i2<='9'; i2++){ for(i3='0'; i3<='9'; i3++){ for(i4='0'; i4<='9'; i4++){ password[2] = i1; password[3] = i2; password[4] = i3; password[5] = i4; if(is_a_match(password)) { } else { //printf("tried: %s\n", password); }} } } } }
80272afbe99a2d02704b4a712c4b7ad44d903b00.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" // Copyright RedPortal, mujjingun 2017 - 2018. // Distributed under the Boost Software License, Version 1.0. // (See accompanying file LICENSE or copy at // http://www.boost.org/LICENSE_1_0.txt) #include <mgcpp/kernels/bits/fill.cuh> #include <cmath> #define BLK 64 namespace mgcpp { __global__ void mgblas_Sfill_impl(float* arr, float value, size_t n) { int const id = blockIdx.x * blockDim.x + threadIdx.x; __shared__ float shared[64]; if(id >= n) return; shared[threadIdx.x] = value; __syncthreads(); arr[id] = shared[threadIdx.x]; } __global__ void mgblas_Dfill_impl(double* arr, double value, size_t n) { int const id = blockIdx.x * blockDim.x + threadIdx.x; __shared__ double shared[64]; if(id >= n) return; shared[threadIdx.x] = value; __syncthreads(); arr[id] = shared[threadIdx.x]; } __global__ void mgblas_Cfill_impl(hipComplex* arr, hipComplex value, size_t n) { int const id = blockIdx.x * blockDim.x + threadIdx.x; __shared__ hipComplex shared[64]; if(id >= n) return; shared[threadIdx.x] = value; __syncthreads(); arr[id] = shared[threadIdx.x]; } __global__ void mgblas_Zfill_impl(hipDoubleComplex* arr, hipDoubleComplex value, size_t n) { int const id = blockIdx.x * blockDim.x + threadIdx.x; __shared__ hipDoubleComplex shared[64]; if(id >= n) return; shared[threadIdx.x] = value; __syncthreads(); arr[id] = shared[threadIdx.x]; } __global__ void mgblas_Hfill_impl(__half* arr, float value, size_t n) { int const id = blockIdx.x * blockDim.x + threadIdx.x; __shared__ __half shared[64]; if(id >= n) return; __half conv_val = __float2half(value); shared[threadIdx.x] = conv_val; __syncthreads(); arr[id] = shared[threadIdx.x]; } mgblas_error_t mgblas_Sfill(float* arr, float value, size_t n) { int grid_size = static_cast<int>( ceil(static_cast<float>(n)/ BLK )); hipLaunchKernelGGL(( mgblas_Sfill_impl), dim3(BLK), dim3(grid_size), 0, 0, arr, value, n); return success; } mgblas_error_t mgblas_Dfill(double* arr, double value, size_t n) { int grid_size = static_cast<int>( ceil(static_cast<float>(n)/ BLK )); hipLaunchKernelGGL(( mgblas_Dfill_impl), dim3(BLK), dim3(grid_size), 0, 0, arr, value, n); return success; } mgblas_error_t mgblas_Cfill(hipComplex* arr, hipComplex value, size_t n) { int grid_size = static_cast<int>( ceil(static_cast<float>(n)/ BLK )); hipLaunchKernelGGL(( mgblas_Cfill_impl), dim3(BLK), dim3(grid_size), 0, 0, arr, value, n); return success; } mgblas_error_t mgblas_Zfill(hipDoubleComplex* arr, hipDoubleComplex value, size_t n) { int grid_size = static_cast<int>( ceil(static_cast<float>(n)/ BLK )); hipLaunchKernelGGL(( mgblas_Zfill_impl), dim3(BLK), dim3(grid_size), 0, 0, arr, value, n); return success; } mgblas_error_t mgblas_Hfill(__half* arr, float value, size_t n) { int grid_size = static_cast<int>( ceil(static_cast<float>(n)/ BLK )); hipLaunchKernelGGL(( mgblas_Hfill_impl), dim3(BLK), dim3(grid_size), 0, 0, arr, value, n); return success; } }
80272afbe99a2d02704b4a712c4b7ad44d903b00.cu
// Copyright RedPortal, mujjingun 2017 - 2018. // Distributed under the Boost Software License, Version 1.0. // (See accompanying file LICENSE or copy at // http://www.boost.org/LICENSE_1_0.txt) #include <mgcpp/kernels/bits/fill.cuh> #include <cmath> #define BLK 64 namespace mgcpp { __global__ void mgblas_Sfill_impl(float* arr, float value, size_t n) { int const id = blockIdx.x * blockDim.x + threadIdx.x; __shared__ float shared[64]; if(id >= n) return; shared[threadIdx.x] = value; __syncthreads(); arr[id] = shared[threadIdx.x]; } __global__ void mgblas_Dfill_impl(double* arr, double value, size_t n) { int const id = blockIdx.x * blockDim.x + threadIdx.x; __shared__ double shared[64]; if(id >= n) return; shared[threadIdx.x] = value; __syncthreads(); arr[id] = shared[threadIdx.x]; } __global__ void mgblas_Cfill_impl(cuComplex* arr, cuComplex value, size_t n) { int const id = blockIdx.x * blockDim.x + threadIdx.x; __shared__ cuComplex shared[64]; if(id >= n) return; shared[threadIdx.x] = value; __syncthreads(); arr[id] = shared[threadIdx.x]; } __global__ void mgblas_Zfill_impl(cuDoubleComplex* arr, cuDoubleComplex value, size_t n) { int const id = blockIdx.x * blockDim.x + threadIdx.x; __shared__ cuDoubleComplex shared[64]; if(id >= n) return; shared[threadIdx.x] = value; __syncthreads(); arr[id] = shared[threadIdx.x]; } __global__ void mgblas_Hfill_impl(__half* arr, float value, size_t n) { int const id = blockIdx.x * blockDim.x + threadIdx.x; __shared__ __half shared[64]; if(id >= n) return; __half conv_val = __float2half(value); shared[threadIdx.x] = conv_val; __syncthreads(); arr[id] = shared[threadIdx.x]; } mgblas_error_t mgblas_Sfill(float* arr, float value, size_t n) { int grid_size = static_cast<int>( ceil(static_cast<float>(n)/ BLK )); mgblas_Sfill_impl<<<BLK, grid_size>>>(arr, value, n); return success; } mgblas_error_t mgblas_Dfill(double* arr, double value, size_t n) { int grid_size = static_cast<int>( ceil(static_cast<float>(n)/ BLK )); mgblas_Dfill_impl<<<BLK, grid_size>>>(arr, value, n); return success; } mgblas_error_t mgblas_Cfill(cuComplex* arr, cuComplex value, size_t n) { int grid_size = static_cast<int>( ceil(static_cast<float>(n)/ BLK )); mgblas_Cfill_impl<<<BLK, grid_size>>>(arr, value, n); return success; } mgblas_error_t mgblas_Zfill(cuDoubleComplex* arr, cuDoubleComplex value, size_t n) { int grid_size = static_cast<int>( ceil(static_cast<float>(n)/ BLK )); mgblas_Zfill_impl<<<BLK, grid_size>>>(arr, value, n); return success; } mgblas_error_t mgblas_Hfill(__half* arr, float value, size_t n) { int grid_size = static_cast<int>( ceil(static_cast<float>(n)/ BLK )); mgblas_Hfill_impl<<<BLK, grid_size>>>(arr, value, n); return success; } }
Simulator.hip
// !!! This is a file automatically generated by hipify!!! #include <iostream> #include <GL/glew.h> #include <GL/freeglut.h> #include <hip/hip_runtime.h> #include <cuda_gl_interop.h> #include "Utilities.h" #include "SpringsBuilder.h" #include "bvh\BRTreeNode.h" #include "bvh\BVHAccel.h" #include "Cloth.h" #include "ObjLoader.h" #include "Verlet.h" #include "Simulator.h" using namespace std; extern GLenum GL_MODE; Simulator::Simulator() : _size_faces(0), _size_vertices(0), _num_faces(0), _num_vertices(0), _id_in(0), _id_out(1), _d_x_orignal(NULL), _d_dir_collision_force(NULL), _d_dir_face_normals(NULL), _d_adj_spring_st(NULL), _d_adj_spring_bd(NULL), _d_adj_face_to_vertices(NULL), _d_adj_vertex_to_faces(NULL), #ifdef DEBUG_COLLISION collided_vertex(NULL), #endif _d_vbo_resource(NULL), _d_vbo_vertices(NULL), _d_vbo_normals(NULL) { _d_x_cur[0] = NULL; _d_x_cur[1] = NULL; _d_x_lst[0] = NULL; _d_x_lst[1] = NULL; } Simulator::~Simulator() { ref_release(); } void Simulator::update_body(const Mesh &body) { if (body.vertices.empty()) { cout << "return" << endl; return; } Mesh ex_body = body; ex_body.extend(0.01f); _bvh_builder.build_bvh(_bvh_tree, ex_body); _ccd_builder.build_ccd(_ccd_tree, body); } // void Simulator::create_buffer() { size_t heap_size = 256 * 1024 * 1024; //set heap size, the default is 8M hipDeviceSetLimit(hipLimitMallocHeapSize, heap_size); //sim_clothGPU const size_t vertices_bytes = sizeof(glm::vec3) * _size_vertices; safe_cuda(hipMalloc((void**)&_d_x_orignal, vertices_bytes)); safe_cuda(hipMalloc((void**)&_d_x_cur[0], vertices_bytes)); safe_cuda(hipMalloc((void**)&_d_x_cur[1], vertices_bytes)); safe_cuda(hipMalloc((void**)&_d_x_lst[0], vertices_bytes)); safe_cuda(hipMalloc((void**)&_d_x_lst[1], vertices_bytes)); _d_x_cur_in = _d_x_cur[_id_in]; _d_x_lst_in = _d_x_lst[_id_in]; _d_x_cur_out = _d_x_cur[_id_out]; _d_x_lst_out = _d_x_lst[_id_out]; safe_cuda(hipMalloc((void**)&_d_dir_collision_force, sizeof(glm::vec3) * _size_vertices)); // safe_cuda(hipMalloc((void**)&_d_dir_face_normals, sizeof(glm::vec3) * _size_faces)); // safe_cuda(hipMalloc((void**)&_d_adj_vertex_to_faces, sizeof(unsigned int) * _size_vertices * sim_parameter.NUM_PER_VERTEX_ADJ_FACES)); // safe_cuda(hipMalloc((void**)&_d_adj_face_to_vertices, sizeof(unsigned int) * _size_faces * 3)); safe_cuda(hipMalloc((void**)&_d_adj_spring_st, sizeof(unsigned int) * _size_vertices * sim_parameter.NUM_PER_VERTEX_SPRING_STRUCT)); safe_cuda(hipMalloc((void**)&_d_adj_spring_bd, sizeof(unsigned int) * _size_vertices * sim_parameter.NUM_PER_VERTEX_SPRING_STRUCT)); #ifdef DEBUG_COLLISION safe_cuda(hipMalloc((void**)&collided_vertex, sizeof(unsigned int) * _size_vertices)); #endif } void Simulator::ref_auto_clean() { hipFree(_d_x_orignal); hipFree(_d_x_cur[0]); hipFree(_d_x_cur[1]); hipFree(_d_x_lst[0]); hipFree(_d_x_lst[1]); hipFree(_d_dir_collision_force); hipFree(_d_dir_face_normals); hipFree(_d_adj_face_to_vertices); hipFree(_d_adj_vertex_to_faces); hipFree(_d_adj_spring_st); hipFree(_d_adj_spring_bd); #ifdef DEBUG_COLLISION hipFree(collided_vertex); #endif } void Simulator::update_cloth(const Cloth &cloth) { //register vbo safe_cuda(hipGraphicsGLRegisterBuffer( &_d_vbo_resource, cloth.vbo.array_buffer, hipGraphicsMapFlagsWriteDiscard)); _num_textures = cloth.texures.size(); _num_vertices = cloth.vertices.size(); _num_faces = cloth.faces.size(); if (_size_vertices < _num_vertices || _size_faces < _num_faces) { _size_vertices = _num_textures; _size_faces = _num_faces; ref_renew(); create_buffer(); } // NUM_PER_VERTEX_ADJ_FACESUINT_MAX std::vector<unsigned int> vertex_adjfaces(_num_vertices * sim_parameter.NUM_PER_VERTEX_ADJ_FACES); cloth.get_vertex_adjface_matrix(vertex_adjfaces, sim_parameter.NUM_PER_VERTEX_ADJ_FACES, UINT_MAX); // GPU const size_t vertices_bytes = sizeof(glm::vec3) * _num_vertices; Vec3s cloth_v3(_num_vertices); for (size_t idx = 0; idx < _num_vertices; ++idx) { glm::vec4 v = cloth.vertices[idx]; cloth_v3[idx] = glm::vec3(v.x, v.y, v.z); } safe_cuda(hipMemcpy(_d_x_orignal, &cloth_v3[0], vertices_bytes, hipMemcpyHostToDevice)); safe_cuda(hipMemcpy(_d_x_cur_in, &cloth_v3[0], vertices_bytes, hipMemcpyHostToDevice)); safe_cuda(hipMemcpy(_d_x_lst_in, &cloth_v3[0], vertices_bytes, hipMemcpyHostToDevice)); //normal + 3 + OPENGL const size_t vertices_index_bytes = sizeof(unsigned int) * _num_faces * 3; safe_cuda(hipMemcpy(_d_adj_face_to_vertices, &cloth.faces[0], vertices_index_bytes, hipMemcpyHostToDevice)); //initilize to 0 safe_cuda(hipMemset(_d_dir_collision_force, 0, sizeof(glm::vec3) * _num_vertices)); const size_t vertex_adjface_bytes = sizeof(unsigned int) * _num_vertices * sim_parameter.NUM_PER_VERTEX_ADJ_FACES; safe_cuda(hipMemcpy(_d_adj_vertex_to_faces, &vertex_adjfaces[0], vertex_adjface_bytes, hipMemcpyHostToDevice)); //GPU _springs_builder.build(cloth, _d_adj_spring_st, _d_adj_spring_bd); #ifdef DEBUG_COLLISION //debug // a safe_cuda(hipMalloc((void**)&collided_vertex, sizeof(int) * _num_vertices)); hipMemset(collided_vertex, 0, sizeof(int) * _num_vertices); cpu_collided_veretx.resize(_num_vertices); updated_vertex.resize(_num_vertices); faces1 = cloth.faces1; #endif } void Simulator::simulate() { unsigned int num_threads, num_blocks; computeGridSize(_num_vertices, 512, num_blocks, num_threads); verlet << < num_blocks, num_threads >> > ( _bvh_tree, _num_vertices, _d_x_cur_in, _d_x_lst_in, _d_x_cur_out, _d_x_lst_out, _d_x_orignal, _d_adj_spring_st, _d_adj_spring_bd, _d_dir_collision_force #ifdef DEBUG_COLLISION , collided_vertex #endif ); safe_cuda(hipDeviceSynchronize()); #ifdef DEBUG_COLLISION hipMemcpy(&cpu_collided_veretx[0], collided_vertex, sizeof(int)*numParticles, hipMemcpyDeviceToHost); hipMemcpy(&updated_vertex[0], _d_vbo_vertices, sizeof(glm::vec4)*numParticles, hipMemcpyDeviceToHost); cout << "*****collided veretx index************" << endl; for (int i = 0; i < cpu_collided_veretx.size(); i++) { if (cpu_collided_veretx[i] == 1) cout << i << " "; } cout << endl; #endif swap_buffer(); } void Simulator::ccd() { unsigned int num_threads, num_blocks; computeGridSize(_num_vertices, 512, num_blocks, num_threads); CCD << < num_blocks, num_threads >> > ( _ccd_tree, _num_vertices, _d_x_cur_in, _d_x_lst_in, _d_x_cur_out, _d_x_lst_out, _d_x_orignal, _d_dir_collision_force ); // stop the CPU until the kernel has been executed safe_cuda(hipDeviceSynchronize()); //debug //hipMemcpy(&cpu_collided_veretx[0],collided_vertex,sizeof(int)*numParticles, hipMemcpyDeviceToHost); //hipMemcpy(&updated_vertex[0], _d_vbo_vertices,sizeof(glm::vec4)*numParticles, hipMemcpyDeviceToHost); //cout << "*****collided veretx index************" << endl; //for (int i = 0; i < cpu_collided_veretx.size(); i++) //{ // if (cpu_collided_veretx[i] == 1) // cout << i << " "; //} //cout << endl; swap_buffer(); } void Simulator::visulize() { size_t num_bytes; safe_cuda(hipGraphicsMapResources(1, &_d_vbo_resource, 0)); safe_cuda(hipGraphicsResourceGetMappedPointer((void **)&_d_vbo_vertices, &num_bytes, _d_vbo_resource)); // normal _d_vbo_normals = (glm::vec3*)((float*)_d_vbo_vertices + 4 * _num_vertices + 2 * _num_textures); unsigned int num_threads, num_blocks; computeGridSize(_num_faces, 512, num_blocks, num_threads); // _num_faces get_face_normal << <num_blocks, num_threads >> > (_num_faces, _d_x_cur_in, _d_adj_face_to_vertices, _d_dir_face_normals); safe_cuda(hipDeviceSynchronize()); computeGridSize(_num_vertices, 512, num_blocks, num_threads); show_vbo << <num_blocks, num_threads >> > (_num_vertices, _d_vbo_vertices, _d_vbo_normals, _d_x_cur_in, _d_adj_vertex_to_faces, _d_dir_face_normals); safe_cuda(hipDeviceSynchronize()); safe_cuda(hipGraphicsUnmapResources(1, &_d_vbo_resource, 0)); } void Simulator::swap_buffer() { int tmp = _id_in; _id_in = _id_out; _id_out = tmp; _d_x_cur_in = _d_x_cur[_id_in]; _d_x_lst_in = _d_x_lst[_id_in]; _d_x_cur_out = _d_x_cur[_id_out]; _d_x_lst_out = _d_x_lst[_id_out]; } #ifdef DEBUG_COLLISION void Simulator::draw_collided_vertex() { //draw outline first for (int i = 0; i < _num_faces; i++) { glm::vec4 ver[3]; glm::vec3 normal[3]; for (int j = 0; j < 3; j++) { ver[j] = updated_vertex[faces1[i].vertex_index[j]]; } glPointSize(1.0); glBegin(GL_MODE); glColor3f(1.0, 1.0, 1.0); for (int j = 0; j < 3; j++) { glVertex3f(ver[j].x, ver[j].y, ver[j].z); } glEnd(); } for (int i = 0; i < cpu_collided_veretx.size(); i++) { glm::vec4 v = updated_vertex[i]; if (cpu_collided_veretx[i] == 1) { //draw it glPointSize(10.0); glBegin(GL_POINTS); glColor3f(1.0, 0, 0); glVertex3f(v.x, v.y, v.z); glEnd(); } } } #endif
Simulator.cu
#include <iostream> #include <GL/glew.h> #include <GL/freeglut.h> #include <cuda_runtime.h> #include <cuda_gl_interop.h> #include "Utilities.h" #include "SpringsBuilder.h" #include "bvh\BRTreeNode.h" #include "bvh\BVHAccel.h" #include "Cloth.h" #include "ObjLoader.h" #include "Verlet.h" #include "Simulator.h" using namespace std; extern GLenum GL_MODE; Simulator::Simulator() : _size_faces(0), _size_vertices(0), _num_faces(0), _num_vertices(0), _id_in(0), _id_out(1), _d_x_orignal(NULL), _d_dir_collision_force(NULL), _d_dir_face_normals(NULL), _d_adj_spring_st(NULL), _d_adj_spring_bd(NULL), _d_adj_face_to_vertices(NULL), _d_adj_vertex_to_faces(NULL), #ifdef DEBUG_COLLISION collided_vertex(NULL), #endif _d_vbo_resource(NULL), _d_vbo_vertices(NULL), _d_vbo_normals(NULL) { _d_x_cur[0] = NULL; _d_x_cur[1] = NULL; _d_x_lst[0] = NULL; _d_x_lst[1] = NULL; } Simulator::~Simulator() { ref_release(); } void Simulator::update_body(const Mesh &body) { if (body.vertices.empty()) { cout << "return" << endl; return; } Mesh ex_body = body; ex_body.extend(0.01f); _bvh_builder.build_bvh(_bvh_tree, ex_body); _ccd_builder.build_ccd(_ccd_tree, body); } // 管理空间 void Simulator::create_buffer() { size_t heap_size = 256 * 1024 * 1024; //set heap size, the default is 8M cudaDeviceSetLimit(cudaLimitMallocHeapSize, heap_size); //将sim_cloth的点的坐标发送到GPU const size_t vertices_bytes = sizeof(glm::vec3) * _size_vertices; safe_cuda(cudaMalloc((void**)&_d_x_orignal, vertices_bytes)); safe_cuda(cudaMalloc((void**)&_d_x_cur[0], vertices_bytes)); safe_cuda(cudaMalloc((void**)&_d_x_cur[1], vertices_bytes)); safe_cuda(cudaMalloc((void**)&_d_x_lst[0], vertices_bytes)); safe_cuda(cudaMalloc((void**)&_d_x_lst[1], vertices_bytes)); _d_x_cur_in = _d_x_cur[_id_in]; _d_x_lst_in = _d_x_lst[_id_in]; _d_x_cur_out = _d_x_cur[_id_out]; _d_x_lst_out = _d_x_lst[_id_out]; safe_cuda(cudaMalloc((void**)&_d_dir_collision_force, sizeof(glm::vec3) * _size_vertices)); // 面的法向量 safe_cuda(cudaMalloc((void**)&_d_dir_face_normals, sizeof(glm::vec3) * _size_faces)); // 每个点邻接的面的索引 safe_cuda(cudaMalloc((void**)&_d_adj_vertex_to_faces, sizeof(unsigned int) * _size_vertices * sim_parameter.NUM_PER_VERTEX_ADJ_FACES)); // 点的索引 safe_cuda(cudaMalloc((void**)&_d_adj_face_to_vertices, sizeof(unsigned int) * _size_faces * 3)); safe_cuda(cudaMalloc((void**)&_d_adj_spring_st, sizeof(unsigned int) * _size_vertices * sim_parameter.NUM_PER_VERTEX_SPRING_STRUCT)); safe_cuda(cudaMalloc((void**)&_d_adj_spring_bd, sizeof(unsigned int) * _size_vertices * sim_parameter.NUM_PER_VERTEX_SPRING_STRUCT)); #ifdef DEBUG_COLLISION safe_cuda(cudaMalloc((void**)&collided_vertex, sizeof(unsigned int) * _size_vertices)); #endif } void Simulator::ref_auto_clean() { cudaFree(_d_x_orignal); cudaFree(_d_x_cur[0]); cudaFree(_d_x_cur[1]); cudaFree(_d_x_lst[0]); cudaFree(_d_x_lst[1]); cudaFree(_d_dir_collision_force); cudaFree(_d_dir_face_normals); cudaFree(_d_adj_face_to_vertices); cudaFree(_d_adj_vertex_to_faces); cudaFree(_d_adj_spring_st); cudaFree(_d_adj_spring_bd); #ifdef DEBUG_COLLISION cudaFree(collided_vertex); #endif } void Simulator::update_cloth(const Cloth &cloth) { //register vbo safe_cuda(cudaGraphicsGLRegisterBuffer( &_d_vbo_resource, cloth.vbo.array_buffer, cudaGraphicsMapFlagsWriteDiscard)); _num_textures = cloth.texures.size(); _num_vertices = cloth.vertices.size(); _num_faces = cloth.faces.size(); if (_size_vertices < _num_vertices || _size_faces < _num_faces) { _size_vertices = _num_textures; _size_faces = _num_faces; ref_renew(); create_buffer(); } // 每个点最大包含NUM_PER_VERTEX_ADJ_FACES个邻近面,不足者以UINT_MAX作为结束标志 std::vector<unsigned int> vertex_adjfaces(_num_vertices * sim_parameter.NUM_PER_VERTEX_ADJ_FACES); cloth.get_vertex_adjface_matrix(vertex_adjfaces, sim_parameter.NUM_PER_VERTEX_ADJ_FACES, UINT_MAX); // 将相关数据传送GPU const size_t vertices_bytes = sizeof(glm::vec3) * _num_vertices; Vec3s cloth_v3(_num_vertices); for (size_t idx = 0; idx < _num_vertices; ++idx) { glm::vec4 v = cloth.vertices[idx]; cloth_v3[idx] = glm::vec3(v.x, v.y, v.z); } safe_cuda(cudaMemcpy(_d_x_orignal, &cloth_v3[0], vertices_bytes, cudaMemcpyHostToDevice)); safe_cuda(cudaMemcpy(_d_x_cur_in, &cloth_v3[0], vertices_bytes, cudaMemcpyHostToDevice)); safe_cuda(cudaMemcpy(_d_x_lst_in, &cloth_v3[0], vertices_bytes, cudaMemcpyHostToDevice)); //计算normal所需的数据:每个点邻接的面的索引 + 每个面的3个点的索引 + 以及所有点的索引(虽然OPENGL有该数据) const size_t vertices_index_bytes = sizeof(unsigned int) * _num_faces * 3; safe_cuda(cudaMemcpy(_d_adj_face_to_vertices, &cloth.faces[0], vertices_index_bytes, cudaMemcpyHostToDevice)); //initilize to 0 safe_cuda(cudaMemset(_d_dir_collision_force, 0, sizeof(glm::vec3) * _num_vertices)); const size_t vertex_adjface_bytes = sizeof(unsigned int) * _num_vertices * sim_parameter.NUM_PER_VERTEX_ADJ_FACES; safe_cuda(cudaMemcpy(_d_adj_vertex_to_faces, &vertex_adjfaces[0], vertex_adjface_bytes, cudaMemcpyHostToDevice)); //弹簧信息,即两级邻域点信息传送GPU _springs_builder.build(cloth, _d_adj_spring_st, _d_adj_spring_bd); #ifdef DEBUG_COLLISION //debug // a safe_cuda(cudaMalloc((void**)&collided_vertex, sizeof(int) * _num_vertices)); cudaMemset(collided_vertex, 0, sizeof(int) * _num_vertices); cpu_collided_veretx.resize(_num_vertices); updated_vertex.resize(_num_vertices); faces1 = cloth.faces1; #endif } void Simulator::simulate() { unsigned int num_threads, num_blocks; computeGridSize(_num_vertices, 512, num_blocks, num_threads); verlet << < num_blocks, num_threads >> > ( _bvh_tree, _num_vertices, _d_x_cur_in, _d_x_lst_in, _d_x_cur_out, _d_x_lst_out, _d_x_orignal, _d_adj_spring_st, _d_adj_spring_bd, _d_dir_collision_force #ifdef DEBUG_COLLISION , collided_vertex #endif ); safe_cuda(cudaDeviceSynchronize()); #ifdef DEBUG_COLLISION cudaMemcpy(&cpu_collided_veretx[0], collided_vertex, sizeof(int)*numParticles, cudaMemcpyDeviceToHost); cudaMemcpy(&updated_vertex[0], _d_vbo_vertices, sizeof(glm::vec4)*numParticles, cudaMemcpyDeviceToHost); cout << "*****collided veretx index************" << endl; for (int i = 0; i < cpu_collided_veretx.size(); i++) { if (cpu_collided_veretx[i] == 1) cout << i << " "; } cout << endl; #endif swap_buffer(); } void Simulator::ccd() { unsigned int num_threads, num_blocks; computeGridSize(_num_vertices, 512, num_blocks, num_threads); CCD << < num_blocks, num_threads >> > ( _ccd_tree, _num_vertices, _d_x_cur_in, _d_x_lst_in, _d_x_cur_out, _d_x_lst_out, _d_x_orignal, _d_dir_collision_force ); // stop the CPU until the kernel has been executed safe_cuda(cudaDeviceSynchronize()); //debug //cudaMemcpy(&cpu_collided_veretx[0],collided_vertex,sizeof(int)*numParticles, cudaMemcpyDeviceToHost); //cudaMemcpy(&updated_vertex[0], _d_vbo_vertices,sizeof(glm::vec4)*numParticles, cudaMemcpyDeviceToHost); //cout << "*****collided veretx index************" << endl; //for (int i = 0; i < cpu_collided_veretx.size(); i++) //{ // if (cpu_collided_veretx[i] == 1) // cout << i << " "; //} //cout << endl; swap_buffer(); } void Simulator::visulize() { size_t num_bytes; safe_cuda(cudaGraphicsMapResources(1, &_d_vbo_resource, 0)); safe_cuda(cudaGraphicsResourceGetMappedPointer((void **)&_d_vbo_vertices, &num_bytes, _d_vbo_resource)); // 获取normal位置指针 _d_vbo_normals = (glm::vec3*)((float*)_d_vbo_vertices + 4 * _num_vertices + 2 * _num_textures); unsigned int num_threads, num_blocks; computeGridSize(_num_faces, 512, num_blocks, num_threads); // _num_faces get_face_normal << <num_blocks, num_threads >> > (_num_faces, _d_x_cur_in, _d_adj_face_to_vertices, _d_dir_face_normals); safe_cuda(cudaDeviceSynchronize()); computeGridSize(_num_vertices, 512, num_blocks, num_threads); show_vbo << <num_blocks, num_threads >> > (_num_vertices, _d_vbo_vertices, _d_vbo_normals, _d_x_cur_in, _d_adj_vertex_to_faces, _d_dir_face_normals); safe_cuda(cudaDeviceSynchronize()); safe_cuda(cudaGraphicsUnmapResources(1, &_d_vbo_resource, 0)); } void Simulator::swap_buffer() { int tmp = _id_in; _id_in = _id_out; _id_out = tmp; _d_x_cur_in = _d_x_cur[_id_in]; _d_x_lst_in = _d_x_lst[_id_in]; _d_x_cur_out = _d_x_cur[_id_out]; _d_x_lst_out = _d_x_lst[_id_out]; } #ifdef DEBUG_COLLISION void Simulator::draw_collided_vertex() { //draw outline first for (int i = 0; i < _num_faces; i++) { glm::vec4 ver[3]; glm::vec3 normal[3]; for (int j = 0; j < 3; j++) { ver[j] = updated_vertex[faces1[i].vertex_index[j]]; } glPointSize(1.0); glBegin(GL_MODE); glColor3f(1.0, 1.0, 1.0); for (int j = 0; j < 3; j++) { glVertex3f(ver[j].x, ver[j].y, ver[j].z); } glEnd(); } for (int i = 0; i < cpu_collided_veretx.size(); i++) { glm::vec4 v = updated_vertex[i]; if (cpu_collided_veretx[i] == 1) { //draw it glPointSize(10.0); glBegin(GL_POINTS); glColor3f(1.0, 0, 0); glVertex3f(v.x, v.y, v.z); glEnd(); } } } #endif
031ef0c656139d1c80824ce4e07c7a58f7f2c7b3.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /* Ocelot 0.4.72 issue. Steve Worley Oct 27 2009 [email protected] Ocelot fails when running kernels using dynamic shared memory, in 32 bit only. 32 bit (ONLY!) Ubuntu 9.04. CUDA 2.3 Compile with: nvcc ocbug.cu -lOcelotExecutive -lOcelotTrace -lOcelotIr -lOcelotParser -lhydrazine -lcudart run, and you get the error output: a.out: ocelot/executive/implementation/CooperativeThreadArray.cpp:1093: ir::PTXU32 executive::CooperativeThreadArray::operandAsU32(int, const ir::PTXOperand&): Assertion `0 == "invalid address mode of operand"' failed. Likely it has to do with the dynamic shared memory. Static variables work fine. */ #include <cstdio> __global__ void kernel(int *source) { extern __shared__ int s[]; s[threadIdx.x]=source[threadIdx.x]; } int main() { int *src; int host[10000]={0}; hipSetDevice(0); hipMalloc((void**)&src, 10000*sizeof(int)); hipMemcpy(src, host, 10000*sizeof(int), hipMemcpyHostToDevice); hipLaunchKernelGGL(( kernel), dim3(128), dim3(128), 15000, 0, src); printf("TEST PASSED\n"); return 0; }
031ef0c656139d1c80824ce4e07c7a58f7f2c7b3.cu
/* Ocelot 0.4.72 issue. Steve Worley Oct 27 2009 [email protected] Ocelot fails when running kernels using dynamic shared memory, in 32 bit only. 32 bit (ONLY!) Ubuntu 9.04. CUDA 2.3 Compile with: nvcc ocbug.cu -lOcelotExecutive -lOcelotTrace -lOcelotIr -lOcelotParser -lhydrazine -lcudart run, and you get the error output: a.out: ocelot/executive/implementation/CooperativeThreadArray.cpp:1093: ir::PTXU32 executive::CooperativeThreadArray::operandAsU32(int, const ir::PTXOperand&): Assertion `0 == "invalid address mode of operand"' failed. Likely it has to do with the dynamic shared memory. Static variables work fine. */ #include <cstdio> __global__ void kernel(int *source) { extern __shared__ int s[]; s[threadIdx.x]=source[threadIdx.x]; } int main() { int *src; int host[10000]={0}; cudaSetDevice(0); cudaMalloc((void**)&src, 10000*sizeof(int)); cudaMemcpy(src, host, 10000*sizeof(int), cudaMemcpyHostToDevice); kernel<<<128, 128, 15000>>>(src); printf("TEST PASSED\n"); return 0; }
bbee07df98ec53f8429a02346d8f6c95c3cd128d.hip
// !!! This is a file automatically generated by hipify!!! #include <stdio.h> #include <stdlib.h> #include <sys/time.h> #include "hip/hip_runtime.h" #include "hip/hip_runtime.h" #include "device_launch_parameters.h" __global__ void reduction_neighbored_pairs(int * arr, int l, int offset) { int gid = blockDim.x * blockIdx.x + threadIdx.x; if(gid>=offset) return; if(gid<offset) { arr[gid]+=arr[gid+offset]; arr[gid+offset]=0; } } int cpu_summer(int * arr, int l) { int s=0; for(int i=0;i<l;i++) { s+=arr[i]; } return s; } int main() { srand(time(0)); int shape=1<<27; int size=shape*sizeof(int); int block_size=128; dim3 block(block_size); dim3 grid(shape>>1/block.x); int * arr; arr=(int *)malloc(size); int * d_arr; hipMalloc((void**)&d_arr, size); for(int i=0; i< shape; i++) { arr[i]=(int)(rand() & 0x0f); } clock_t ct1,ct2,gt1,gt2,gtt1,gtt2; ct1=clock(); int cpu=cpu_summer(arr, shape); ct2=clock(); gtt1=clock(); hipMemcpy(d_arr, arr, size, hipMemcpyHostToDevice); gt1=clock(); for(int offset=shape>>1;offset!=0;offset=offset>>1) { grid.x=offset>block.x?offset/block.x:1; hipLaunchKernelGGL(( reduction_neighbored_pairs), dim3(grid), dim3(block), 0, 0, d_arr, shape, offset); hipDeviceSynchronize(); } gt2=clock(); hipMemcpy(arr, d_arr, sizeof(int), hipMemcpyDeviceToHost); gtt2=clock(); printf("CPU sum : %d \nGPU sum : %d\n",cpu, arr[0]); printf(cpu==arr[0]?"CPU and GPU values Match\n":"CPU and GPU values do not match\n"); printf("GPU memory transfer time : %lf sec\n",(double)((-gt2+gt1+gtt2-gtt1)/(double)CLOCKS_PER_SEC)); printf("GPU calculation time : %lf sec\n",(double)((gt2-gt1)/(double)CLOCKS_PER_SEC)); printf("CPU calculation time : %lf sec\n",(double)((ct2-ct1)/(double)CLOCKS_PER_SEC)); hipFree(d_arr); free(arr); hipDeviceReset(); return 0; }
bbee07df98ec53f8429a02346d8f6c95c3cd128d.cu
#include <stdio.h> #include <stdlib.h> #include <sys/time.h> #include "cuda.h" #include "cuda_runtime.h" #include "device_launch_parameters.h" __global__ void reduction_neighbored_pairs(int * arr, int l, int offset) { int gid = blockDim.x * blockIdx.x + threadIdx.x; if(gid>=offset) return; if(gid<offset) { arr[gid]+=arr[gid+offset]; arr[gid+offset]=0; } } int cpu_summer(int * arr, int l) { int s=0; for(int i=0;i<l;i++) { s+=arr[i]; } return s; } int main() { srand(time(0)); int shape=1<<27; int size=shape*sizeof(int); int block_size=128; dim3 block(block_size); dim3 grid(shape>>1/block.x); int * arr; arr=(int *)malloc(size); int * d_arr; cudaMalloc((void**)&d_arr, size); for(int i=0; i< shape; i++) { arr[i]=(int)(rand() & 0x0f); } clock_t ct1,ct2,gt1,gt2,gtt1,gtt2; ct1=clock(); int cpu=cpu_summer(arr, shape); ct2=clock(); gtt1=clock(); cudaMemcpy(d_arr, arr, size, cudaMemcpyHostToDevice); gt1=clock(); for(int offset=shape>>1;offset!=0;offset=offset>>1) { grid.x=offset>block.x?offset/block.x:1; reduction_neighbored_pairs<<<grid, block>>>(d_arr, shape, offset); cudaDeviceSynchronize(); } gt2=clock(); cudaMemcpy(arr, d_arr, sizeof(int), cudaMemcpyDeviceToHost); gtt2=clock(); printf("CPU sum : %d \nGPU sum : %d\n",cpu, arr[0]); printf(cpu==arr[0]?"CPU and GPU values Match\n":"CPU and GPU values do not match\n"); printf("GPU memory transfer time : %lf sec\n",(double)((-gt2+gt1+gtt2-gtt1)/(double)CLOCKS_PER_SEC)); printf("GPU calculation time : %lf sec\n",(double)((gt2-gt1)/(double)CLOCKS_PER_SEC)); printf("CPU calculation time : %lf sec\n",(double)((ct2-ct1)/(double)CLOCKS_PER_SEC)); cudaFree(d_arr); free(arr); cudaDeviceReset(); return 0; }
c734b7f7445f7baee654de15d7df7beda261ba75.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /* * Copyright (c) 2012, NVIDIA CORPORATION. All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * * Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * * Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * Neither the name of NVIDIA CORPORATION nor the names of its * contributors may be used to endorse or promote products derived * from this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ``AS IS'' AND ANY * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ /* * Paulius Micikevicius ([email protected]) * Max Grossman ([email protected]) */ #include <stdio.h> #include <stdlib.h> #include <string.h> #include <math.h> #include <sys/time.h> #include <unistd.h> /* * Before tackling this TODO, please read the TODO lower in this file that talks * about modifications to dimx. * * Once you have tackled the below TODO, you will need to modify this macro so * that the zeroth element in each row starts at an even multiple of 128 bytes. * The POINT_OFFSET macro is used to compute the offset in the curr, next, vsq * arrays of an (x, y) coordinate. Please note that x and y may be negative when * passed to this macro. * * By default, it computes this by multiplying the offset in the y direction * ('radius' rows of padding + 'y' rows) by 'dimx', the size of each row. It * then adds an offset of 'radius' elements of padding + 'x' columns. However, * given x=0 for any y, the byte alignment is not guaranteed to be 128 bytes * even if rows are always a multiple of 128 bytes because of the radius offset. */ #define POINT_OFFSET(x, y, dimx, radius) \ (((radius) + (y)) * (dimx) + ((radius) + (x))) #include "common.h" #include "common2d.h" #define BDIMX 32 #define BDIMY 16 __constant__ TYPE const_c_coeff[NUM_COEFF]; __global__ void fwd_kernel(TYPE *next, TYPE *curr, TYPE *vsq, int nx, int ny, int dimx, int radius) { const int y = blockIdx.y * blockDim.y + threadIdx.y; const int x = blockIdx.x * blockDim.x + threadIdx.x; const int this_offset = POINT_OFFSET(x, y, dimx, radius); TYPE div = const_c_coeff[0] * curr[this_offset]; for (int d = 1; d <= radius; d++) { const int y_pos_offset = POINT_OFFSET(x, y + d, dimx, radius); const int y_neg_offset = POINT_OFFSET(x, y - d, dimx, radius); const int x_pos_offset = POINT_OFFSET(x + d, y, dimx, radius); const int x_neg_offset = POINT_OFFSET(x - d, y, dimx, radius); div += const_c_coeff[d] * (curr[y_pos_offset] + curr[y_neg_offset] + curr[x_pos_offset] + curr[x_neg_offset]); } const TYPE temp = 2.0f * curr[this_offset] - next[this_offset]; next[this_offset] = temp + div * vsq[this_offset]; } int main( int argc, char *argv[] ) { config conf; setup_config(&conf, argc, argv); init_progress(conf.progress_width, conf.nsteps, conf.progress_disabled); if (conf.nx % BDIMX != 0) { fprintf(stderr, "Invalid nx configuration, must be an even multiple of " "%d\n", BDIMX); return 1; } if (conf.ny % BDIMY != 0) { fprintf(stderr, "Invalid ny configuration, must be an even multiple of " "%d\n", BDIMY); return 1; } TYPE dx = 20.f; TYPE dt = 0.002f; /* * TODO Change dimx below to ensure that each row of curr, next, vsq starts * at a 128-byte aligned boundary. * * The fundamental problem with the current code is that if the following is * not guaranteed to be an even multiple of 128 bytes: * * (conf.nx + 2 * conf.radius) * sizeof(TYPE) * * then all rows of the allocated 2D matrices other than the first are * likely to start on mis-aligned byte boundaries. * * Therefore, the key change to make is to modify dimx such that each row * starts at a 128-byte boundary (i.e. the size of each row is itself a * multiple of 128 bytes). */ // compute the pitch for perfect coalescing size_t dimx = conf.nx + 2*conf.radius; size_t dimy = conf.ny + 2*conf.radius; size_t nbytes = dimx * dimy * sizeof(TYPE); if (conf.verbose) { printf("x = %zu, y = %zu\n", dimx, dimy); printf("nsteps = %d\n", conf.nsteps); printf("radius = %d\n", conf.radius); } TYPE c_coeff[NUM_COEFF]; TYPE *curr = (TYPE *)malloc(nbytes); TYPE *next = (TYPE *)malloc(nbytes); TYPE *vsq = (TYPE *)malloc(nbytes); if (curr == NULL || next == NULL || vsq == NULL) { fprintf(stderr, "Allocations failed\n"); return 1; } config_sources(&conf.srcs, &conf.nsrcs, conf.nx, conf.ny, conf.nsteps); TYPE **srcs = sample_sources(conf.srcs, conf.nsrcs, conf.nsteps, dt); init_data(curr, next, vsq, c_coeff, dimx, dimy, dimx * sizeof(TYPE), dx, dt); TYPE *d_curr, *d_next, *d_vsq; CHECK(hipMalloc((void **)&d_curr, nbytes)); CHECK(hipMalloc((void **)&d_next, nbytes)); CHECK(hipMalloc((void **)&d_vsq, nbytes)); dim3 block(BDIMX, BDIMY); dim3 grid(conf.nx / block.x, conf.ny / block.y); double mem_start = seconds(); CHECK(hipMemcpy(d_curr, curr, nbytes, hipMemcpyHostToDevice)); CHECK(hipMemcpy(d_next, next, nbytes, hipMemcpyHostToDevice)); CHECK(hipMemcpy(d_vsq, vsq, nbytes, hipMemcpyHostToDevice)); CHECK(hipMemcpyToSymbol(const_c_coeff, c_coeff, NUM_COEFF * sizeof(TYPE))); double start = seconds(); for (int step = 0; step < conf.nsteps; step++) { for (int src = 0; src < conf.nsrcs; src++) { if (conf.srcs[src].t > step) continue; int src_offset = POINT_OFFSET(conf.srcs[src].x, conf.srcs[src].y, dimx, conf.radius); CHECK(hipMemcpy(d_curr + src_offset, srcs[src] + step, sizeof(TYPE), hipMemcpyHostToDevice)); } hipLaunchKernelGGL(( fwd_kernel), dim3(grid), dim3(block), 0, 0, d_next, d_curr, d_vsq, conf.nx, conf.ny, dimx, conf.radius); TYPE *tmp = d_next; d_next = d_curr; d_curr = tmp; update_progress(step + 1); } CHECK(hipDeviceSynchronize()); CHECK(hipGetLastError()); double compute_s = seconds() - start; CHECK(hipMemcpy(curr, d_curr, nbytes, hipMemcpyDeviceToHost)); double total_s = seconds() - mem_start; float point_rate = (float)conf.nx * conf.ny / (compute_s / conf.nsteps); printf("iso_r4_2x: %8.10f s total, %8.10f s/step, %8.2f Mcells/s/step\n", total_s, compute_s / conf.nsteps, point_rate / 1000000.f); if (conf.save_text) { save_text(curr, dimx, dimy, conf.ny, conf.nx, "snap.text", conf.radius); } free(curr); free(next); free(vsq); for (int i = 0; i < conf.nsrcs; i++) { free(srcs[i]); } free(srcs); CHECK(hipFree(d_curr)); CHECK(hipFree(d_next)); CHECK(hipFree(d_vsq)); return 0; }
c734b7f7445f7baee654de15d7df7beda261ba75.cu
/* * Copyright (c) 2012, NVIDIA CORPORATION. All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * * Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * * Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * Neither the name of NVIDIA CORPORATION nor the names of its * contributors may be used to endorse or promote products derived * from this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ``AS IS'' AND ANY * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ /* * Paulius Micikevicius ([email protected]) * Max Grossman ([email protected]) */ #include <stdio.h> #include <stdlib.h> #include <string.h> #include <math.h> #include <sys/time.h> #include <unistd.h> /* * Before tackling this TODO, please read the TODO lower in this file that talks * about modifications to dimx. * * Once you have tackled the below TODO, you will need to modify this macro so * that the zeroth element in each row starts at an even multiple of 128 bytes. * The POINT_OFFSET macro is used to compute the offset in the curr, next, vsq * arrays of an (x, y) coordinate. Please note that x and y may be negative when * passed to this macro. * * By default, it computes this by multiplying the offset in the y direction * ('radius' rows of padding + 'y' rows) by 'dimx', the size of each row. It * then adds an offset of 'radius' elements of padding + 'x' columns. However, * given x=0 for any y, the byte alignment is not guaranteed to be 128 bytes * even if rows are always a multiple of 128 bytes because of the radius offset. */ #define POINT_OFFSET(x, y, dimx, radius) \ (((radius) + (y)) * (dimx) + ((radius) + (x))) #include "common.h" #include "common2d.h" #define BDIMX 32 #define BDIMY 16 __constant__ TYPE const_c_coeff[NUM_COEFF]; __global__ void fwd_kernel(TYPE *next, TYPE *curr, TYPE *vsq, int nx, int ny, int dimx, int radius) { const int y = blockIdx.y * blockDim.y + threadIdx.y; const int x = blockIdx.x * blockDim.x + threadIdx.x; const int this_offset = POINT_OFFSET(x, y, dimx, radius); TYPE div = const_c_coeff[0] * curr[this_offset]; for (int d = 1; d <= radius; d++) { const int y_pos_offset = POINT_OFFSET(x, y + d, dimx, radius); const int y_neg_offset = POINT_OFFSET(x, y - d, dimx, radius); const int x_pos_offset = POINT_OFFSET(x + d, y, dimx, radius); const int x_neg_offset = POINT_OFFSET(x - d, y, dimx, radius); div += const_c_coeff[d] * (curr[y_pos_offset] + curr[y_neg_offset] + curr[x_pos_offset] + curr[x_neg_offset]); } const TYPE temp = 2.0f * curr[this_offset] - next[this_offset]; next[this_offset] = temp + div * vsq[this_offset]; } int main( int argc, char *argv[] ) { config conf; setup_config(&conf, argc, argv); init_progress(conf.progress_width, conf.nsteps, conf.progress_disabled); if (conf.nx % BDIMX != 0) { fprintf(stderr, "Invalid nx configuration, must be an even multiple of " "%d\n", BDIMX); return 1; } if (conf.ny % BDIMY != 0) { fprintf(stderr, "Invalid ny configuration, must be an even multiple of " "%d\n", BDIMY); return 1; } TYPE dx = 20.f; TYPE dt = 0.002f; /* * TODO Change dimx below to ensure that each row of curr, next, vsq starts * at a 128-byte aligned boundary. * * The fundamental problem with the current code is that if the following is * not guaranteed to be an even multiple of 128 bytes: * * (conf.nx + 2 * conf.radius) * sizeof(TYPE) * * then all rows of the allocated 2D matrices other than the first are * likely to start on mis-aligned byte boundaries. * * Therefore, the key change to make is to modify dimx such that each row * starts at a 128-byte boundary (i.e. the size of each row is itself a * multiple of 128 bytes). */ // compute the pitch for perfect coalescing size_t dimx = conf.nx + 2*conf.radius; size_t dimy = conf.ny + 2*conf.radius; size_t nbytes = dimx * dimy * sizeof(TYPE); if (conf.verbose) { printf("x = %zu, y = %zu\n", dimx, dimy); printf("nsteps = %d\n", conf.nsteps); printf("radius = %d\n", conf.radius); } TYPE c_coeff[NUM_COEFF]; TYPE *curr = (TYPE *)malloc(nbytes); TYPE *next = (TYPE *)malloc(nbytes); TYPE *vsq = (TYPE *)malloc(nbytes); if (curr == NULL || next == NULL || vsq == NULL) { fprintf(stderr, "Allocations failed\n"); return 1; } config_sources(&conf.srcs, &conf.nsrcs, conf.nx, conf.ny, conf.nsteps); TYPE **srcs = sample_sources(conf.srcs, conf.nsrcs, conf.nsteps, dt); init_data(curr, next, vsq, c_coeff, dimx, dimy, dimx * sizeof(TYPE), dx, dt); TYPE *d_curr, *d_next, *d_vsq; CHECK(cudaMalloc((void **)&d_curr, nbytes)); CHECK(cudaMalloc((void **)&d_next, nbytes)); CHECK(cudaMalloc((void **)&d_vsq, nbytes)); dim3 block(BDIMX, BDIMY); dim3 grid(conf.nx / block.x, conf.ny / block.y); double mem_start = seconds(); CHECK(cudaMemcpy(d_curr, curr, nbytes, cudaMemcpyHostToDevice)); CHECK(cudaMemcpy(d_next, next, nbytes, cudaMemcpyHostToDevice)); CHECK(cudaMemcpy(d_vsq, vsq, nbytes, cudaMemcpyHostToDevice)); CHECK(cudaMemcpyToSymbol(const_c_coeff, c_coeff, NUM_COEFF * sizeof(TYPE))); double start = seconds(); for (int step = 0; step < conf.nsteps; step++) { for (int src = 0; src < conf.nsrcs; src++) { if (conf.srcs[src].t > step) continue; int src_offset = POINT_OFFSET(conf.srcs[src].x, conf.srcs[src].y, dimx, conf.radius); CHECK(cudaMemcpy(d_curr + src_offset, srcs[src] + step, sizeof(TYPE), cudaMemcpyHostToDevice)); } fwd_kernel<<<grid, block>>>(d_next, d_curr, d_vsq, conf.nx, conf.ny, dimx, conf.radius); TYPE *tmp = d_next; d_next = d_curr; d_curr = tmp; update_progress(step + 1); } CHECK(cudaDeviceSynchronize()); CHECK(cudaGetLastError()); double compute_s = seconds() - start; CHECK(cudaMemcpy(curr, d_curr, nbytes, cudaMemcpyDeviceToHost)); double total_s = seconds() - mem_start; float point_rate = (float)conf.nx * conf.ny / (compute_s / conf.nsteps); printf("iso_r4_2x: %8.10f s total, %8.10f s/step, %8.2f Mcells/s/step\n", total_s, compute_s / conf.nsteps, point_rate / 1000000.f); if (conf.save_text) { save_text(curr, dimx, dimy, conf.ny, conf.nx, "snap.text", conf.radius); } free(curr); free(next); free(vsq); for (int i = 0; i < conf.nsrcs; i++) { free(srcs[i]); } free(srcs); CHECK(cudaFree(d_curr)); CHECK(cudaFree(d_next)); CHECK(cudaFree(d_vsq)); return 0; }
c45d0a35a99fc8f31a749ad3eee37d055a0a14fa.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" // // Created by JEONGHYUNLEE on 2020/09/28. // #include <stdio.h> #include <caffe/caffe.hpp> #include "./common.hpp" using namespace caffe; typedef float Dtype; // Kernel Function __global__ void find_kernel(const int n, const Dtype *target, Dtype *idx, const Dtype value ) { CUDA_KERNEL_LOOP(index, n) { if (target[index] == value) { idx[index] = 1.0; } else { idx[index] = 0.0; } } } // Wrapper void caffe_gpu_find(const int N, const Dtype *target, Dtype *idx, const Dtype value) { hipLaunchKernelGGL(( find_kernel), dim3(CAFFE_GET_BLOCKS(N)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0, N, target, idx, value); } // Test Code int main() { vector<int> weight_shape = {2, 2, 2, 2}; shared_ptr <Blob<Dtype>> blob_a(new Blob<Dtype>(weight_shape)); shared_ptr <Blob<Dtype>> blob_b(new Blob<Dtype>(weight_shape)); set_values(blob_a->mutable_cpu_data(), {0, 0, 0, 0, 1, 1, 1, 1, 2, 2, 2, 2, 3, 3, 3, 3}, blob_a->count()); set_values(blob_b->mutable_cpu_data(), {0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}, blob_b->count()); print_tensor("a", blob_a->cpu_data(), blob_a->shape()); print_tensor("b", blob_b->cpu_data(), blob_b->shape()); CHECK_EQ(blob_a->count(), blob_b->count()); caffe_gpu_find(blob_a->count(), blob_a->gpu_data(), blob_b->mutable_gpu_data(), 2); print_tensor("a", blob_a->cpu_data(), blob_a->shape()); print_tensor("b", blob_b->cpu_data(), blob_b->shape()); return 0; }
c45d0a35a99fc8f31a749ad3eee37d055a0a14fa.cu
// // Created by JEONGHYUNLEE on 2020/09/28. // #include <stdio.h> #include <caffe/caffe.hpp> #include "./common.hpp" using namespace caffe; typedef float Dtype; // Kernel Function __global__ void find_kernel(const int n, const Dtype *target, Dtype *idx, const Dtype value ) { CUDA_KERNEL_LOOP(index, n) { if (target[index] == value) { idx[index] = 1.0; } else { idx[index] = 0.0; } } } // Wrapper void caffe_gpu_find(const int N, const Dtype *target, Dtype *idx, const Dtype value) { find_kernel<<<CAFFE_GET_BLOCKS(N), CAFFE_CUDA_NUM_THREADS>>>(N, target, idx, value); } // Test Code int main() { vector<int> weight_shape = {2, 2, 2, 2}; shared_ptr <Blob<Dtype>> blob_a(new Blob<Dtype>(weight_shape)); shared_ptr <Blob<Dtype>> blob_b(new Blob<Dtype>(weight_shape)); set_values(blob_a->mutable_cpu_data(), {0, 0, 0, 0, 1, 1, 1, 1, 2, 2, 2, 2, 3, 3, 3, 3}, blob_a->count()); set_values(blob_b->mutable_cpu_data(), {0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}, blob_b->count()); print_tensor("a", blob_a->cpu_data(), blob_a->shape()); print_tensor("b", blob_b->cpu_data(), blob_b->shape()); CHECK_EQ(blob_a->count(), blob_b->count()); caffe_gpu_find(blob_a->count(), blob_a->gpu_data(), blob_b->mutable_gpu_data(), 2); print_tensor("a", blob_a->cpu_data(), blob_a->shape()); print_tensor("b", blob_b->cpu_data(), blob_b->shape()); return 0; }
f40002799ba29b0e4164da4d1b9d25dadaec103b.hip
// !!! This is a file automatically generated by hipify!!! /* Copyright 2018 XIAOLIN WANG ([email protected]; [email protected]) Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ #include "ParamsMt.h" #include "Global.h" namespace cytonMt { ParamsMt::ParamsMt() { const Option options[] = { {"mode", "", "train/translate"}, {"saveModel", "", ""}, {"loadModel", "", "load model for continue training or translate"}, {"maxSaveModels", "10","maximum number of saved models"}, {"train", "trainSrc:trainTrg", "source-side and target-side training files, one sentences per line"}, {"dev", "devSrc:devTrg", "source-side and target-side development files, one sentences per line"}, {"testInput", "testInput", "input file for translating"}, {"testOutput", "testOutput", "output file for translating"}, {"vocab", "vocabSrc:vocabTrg", "source-side and target-side vocabulary files, one word per line"}, {"srcVocabSize", "0", "size of source-side vocabulary, 0 means using whole vocabulary in vocabSrc file"}, {"trgVocabSize", "0", "size of source-side vocabulary, 0 means using whole vocabulary in vocabTrg file"}, {"ignoreUnk", "1", "0/1, 1 means ignoring unknown words"}, {"initParam", "0.1", "initialize weights uniformly in (-initParam, initParam)"}, {"optimization", "SGD", "SGD/Adam"}, {"learningRate", "1", "learning rate"}, {"decayRate", "0.7", "decay factor of learning rate"}, {"decayStart", "1000", "learning rate start to decay from the epoch of decayStart"}, {"decayConti", "0", "0/1, 1 means that learning rate keeps decaying per check once it decays, OpenNMT's mode, "}, {"decayStatus", "0", "0/1, 1 means that learning rate is in a status of decaying, useful for continue training."}, {"epochs", "100", "max epochs of training"}, {"epochStart", "1", "the number of first epoch, useful for continue training"}, {"batchSize", "64", "batch size"}, {"maxSeqLen", "100", "max length of source and target sentence"}, {"hiddenSize", "512", "size of word embedding and hidden states"}, {"numLayers", "2", "number of encoder/decoder layers"}, {"dropout", "0.2", "dropout rate, 0 means disabling dropout"}, {"clipGradient", "5", "threshold for clip gradient"}, {"labelSmooth", "0.1", "factor of smoothing the target labels"}, {"probeFreq", "1", "number of times probing the development likelihood per epoch"}, {"probeMargin", "0.01", "margin for checking whether the development likelihood has increased"}, {"patience", "1", "threshold for decaying the learning rate and restart training from the best model"}, {"beamSize", "10", "size of beam search in translating"}, {"lenPenalty", "0.6", "length penalty"}, {"","",""} }; addOptions(options); } void ParamsMt::init_members() { mode=opt2val["--mode"]; saveModel=get("saveModel"); if(!saveModel.empty()) { saveModel+="/"; } loadModel=get("loadModel"); maxSaveModels=geti("maxSaveModels"); trainData=get("train"); devData=get("dev"); testInput=get("testInput"); testOutput=get("testOutput"); vector<string> ts; XLLib::str2list(get("vocab"),":", ts); if(!ts.empty()) { if(ts.size()!=2) { XLLib::printfln("the parameter of vocab is wrong: %s", get("vocab")); exit(1); } srcVocab=ts.at(0); trgVocab=ts.at(1); } srcVocabSize=geti("srcVocabSize"); trgVocabSize=geti("trgVocabSize"); ignoreUnk=geti("ignoreUnk"); initParam=getf("initParam"); optimization=get("optimization"); learningRate=getf("learningRate"); decayRate=getf("decayRate"); decayStart=getf("decayStart"); decayConti=geti("decayConti"); decayStatus=geti("decayStatus"); epochs=geti("epochs"); epochStart=geti("epochStart"); cytonLib::batchSize=geti("batchSize"); maxSeqLen=geti("maxSeqLen"); XLLib::str2ints(get("hiddenSize"), ":", hiddenSize); numLayers=geti("numLayers"); dropout=getf("dropout"); clipGradient=getf("clipGradient"); labelSmooth=getf("labelSmooth"); probeFreq=getf("probeFreq"); probeMargin=getf("probeMargin"); patience=geti("patience"); beamSize=geti("beamSize"); lenPenalty=getf("lenPenalty"); if(!loadModel.empty()) { XLLib::str2list(loadModel, ":", ts); string tModel=ts.at(0); int i=tModel.rfind("/"); string tDir=tModel.substr(0,i+1); string tFile=tDir+"/settings"; XLLib::printfln(os, "load arguments from %s", tFile.c_str()); loadModelParams(tFile); } } void ParamsMt::saveModelParams(std::string fileName) { std::ofstream f(fileName.c_str()); f<<numLayers<<"\n"; f<<XLLib::toString_vec_ostream(hiddenSize,":")<<"\n"; f<<srcVocabSize<<"\n"; f<<trgVocabSize<<"\n"; f<<cytonLib::batchSize<<"\n"; f<<maxSeqLen<<"\n"; f.close(); } void ParamsMt::loadModelParams(std::string fileName) { std::ifstream f(fileName.c_str()); string t; f>>numLayers; getline(f, t); if(t.empty()) { getline(f, t); } hiddenSize.clear(); XLLib::str2ints(t, ":", hiddenSize); getline(f, t); srcVocabSize=atoi(t.c_str()); getline(f, t); trgVocabSize=atoi(t.c_str()); f>>cytonLib::batchSize; int tn; f>>tn; maxSeqLen=::max(maxSeqLen, tn); f.close(); } ParamsMt params; }
f40002799ba29b0e4164da4d1b9d25dadaec103b.cu
/* Copyright 2018 XIAOLIN WANG ([email protected]; [email protected]) Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ #include "ParamsMt.h" #include "Global.h" namespace cytonMt { ParamsMt::ParamsMt() { const Option options[] = { {"mode", "", "train/translate"}, {"saveModel", "", ""}, {"loadModel", "", "load model for continue training or translate"}, {"maxSaveModels", "10","maximum number of saved models"}, {"train", "trainSrc:trainTrg", "source-side and target-side training files, one sentences per line"}, {"dev", "devSrc:devTrg", "source-side and target-side development files, one sentences per line"}, {"testInput", "testInput", "input file for translating"}, {"testOutput", "testOutput", "output file for translating"}, {"vocab", "vocabSrc:vocabTrg", "source-side and target-side vocabulary files, one word per line"}, {"srcVocabSize", "0", "size of source-side vocabulary, 0 means using whole vocabulary in vocabSrc file"}, {"trgVocabSize", "0", "size of source-side vocabulary, 0 means using whole vocabulary in vocabTrg file"}, {"ignoreUnk", "1", "0/1, 1 means ignoring unknown words"}, {"initParam", "0.1", "initialize weights uniformly in (-initParam, initParam)"}, {"optimization", "SGD", "SGD/Adam"}, {"learningRate", "1", "learning rate"}, {"decayRate", "0.7", "decay factor of learning rate"}, {"decayStart", "1000", "learning rate start to decay from the epoch of decayStart"}, {"decayConti", "0", "0/1, 1 means that learning rate keeps decaying per check once it decays, OpenNMT's mode, "}, {"decayStatus", "0", "0/1, 1 means that learning rate is in a status of decaying, useful for continue training."}, {"epochs", "100", "max epochs of training"}, {"epochStart", "1", "the number of first epoch, useful for continue training"}, {"batchSize", "64", "batch size"}, {"maxSeqLen", "100", "max length of source and target sentence"}, {"hiddenSize", "512", "size of word embedding and hidden states"}, {"numLayers", "2", "number of encoder/decoder layers"}, {"dropout", "0.2", "dropout rate, 0 means disabling dropout"}, {"clipGradient", "5", "threshold for clip gradient"}, {"labelSmooth", "0.1", "factor of smoothing the target labels"}, {"probeFreq", "1", "number of times probing the development likelihood per epoch"}, {"probeMargin", "0.01", "margin for checking whether the development likelihood has increased"}, {"patience", "1", "threshold for decaying the learning rate and restart training from the best model"}, {"beamSize", "10", "size of beam search in translating"}, {"lenPenalty", "0.6", "length penalty"}, {"","",""} }; addOptions(options); } void ParamsMt::init_members() { mode=opt2val["--mode"]; saveModel=get("saveModel"); if(!saveModel.empty()) { saveModel+="/"; } loadModel=get("loadModel"); maxSaveModels=geti("maxSaveModels"); trainData=get("train"); devData=get("dev"); testInput=get("testInput"); testOutput=get("testOutput"); vector<string> ts; XLLib::str2list(get("vocab"),":", ts); if(!ts.empty()) { if(ts.size()!=2) { XLLib::printfln("the parameter of vocab is wrong: %s", get("vocab")); exit(1); } srcVocab=ts.at(0); trgVocab=ts.at(1); } srcVocabSize=geti("srcVocabSize"); trgVocabSize=geti("trgVocabSize"); ignoreUnk=geti("ignoreUnk"); initParam=getf("initParam"); optimization=get("optimization"); learningRate=getf("learningRate"); decayRate=getf("decayRate"); decayStart=getf("decayStart"); decayConti=geti("decayConti"); decayStatus=geti("decayStatus"); epochs=geti("epochs"); epochStart=geti("epochStart"); cytonLib::batchSize=geti("batchSize"); maxSeqLen=geti("maxSeqLen"); XLLib::str2ints(get("hiddenSize"), ":", hiddenSize); numLayers=geti("numLayers"); dropout=getf("dropout"); clipGradient=getf("clipGradient"); labelSmooth=getf("labelSmooth"); probeFreq=getf("probeFreq"); probeMargin=getf("probeMargin"); patience=geti("patience"); beamSize=geti("beamSize"); lenPenalty=getf("lenPenalty"); if(!loadModel.empty()) { XLLib::str2list(loadModel, ":", ts); string tModel=ts.at(0); int i=tModel.rfind("/"); string tDir=tModel.substr(0,i+1); string tFile=tDir+"/settings"; XLLib::printfln(os, "load arguments from %s", tFile.c_str()); loadModelParams(tFile); } } void ParamsMt::saveModelParams(std::string fileName) { std::ofstream f(fileName.c_str()); f<<numLayers<<"\n"; f<<XLLib::toString_vec_ostream(hiddenSize,":")<<"\n"; f<<srcVocabSize<<"\n"; f<<trgVocabSize<<"\n"; f<<cytonLib::batchSize<<"\n"; f<<maxSeqLen<<"\n"; f.close(); } void ParamsMt::loadModelParams(std::string fileName) { std::ifstream f(fileName.c_str()); string t; f>>numLayers; getline(f, t); if(t.empty()) { getline(f, t); } hiddenSize.clear(); XLLib::str2ints(t, ":", hiddenSize); getline(f, t); srcVocabSize=atoi(t.c_str()); getline(f, t); trgVocabSize=atoi(t.c_str()); f>>cytonLib::batchSize; int tn; f>>tn; maxSeqLen=std::max(maxSeqLen, tn); f.close(); } ParamsMt params; }
8bd12e0cd2ca21e95f71cb711c7fb8e5370c794c.hip
// !!! This is a file automatically generated by hipify!!! /** * Copyright 2018-2023 by XGBoost Contributors * \author Rory Mitchell */ #include <thrust/execution_policy.h> #include <thrust/inner_product.h> #include <xgboost/data.h> #include <xgboost/linear_updater.h> #include "xgboost/span.h" #include "coordinate_common.h" #include "../common/common.h" #include "../common/device_helpers.cuh" #include "../common/timer.h" #include "./param.h" namespace xgboost { namespace linear { DMLC_REGISTRY_FILE_TAG(updater_gpu_coordinate); /** * \class GPUCoordinateUpdater * * \brief Coordinate descent algorithm that updates one feature per iteration */ class GPUCoordinateUpdater : public LinearUpdater { // NOLINT public: // set training parameter void Configure(Args const& args) override { tparam_.UpdateAllowUnknown(args); coord_param_.UpdateAllowUnknown(args); selector_.reset(FeatureSelector::Create(tparam_.feature_selector, ctx_->Threads())); monitor_.Init("GPUCoordinateUpdater"); } void LoadConfig(Json const& in) override { auto const& config = get<Object const>(in); FromJson(config.at("linear_train_param"), &tparam_); FromJson(config.at("coordinate_param"), &coord_param_); } void SaveConfig(Json* p_out) const override { auto& out = *p_out; out["linear_train_param"] = ToJson(tparam_); out["coordinate_param"] = ToJson(coord_param_); } void LazyInitDevice(DMatrix *p_fmat, const LearnerModelParam &model_param) { if (ctx_->gpu_id < 0) return; num_row_ = static_cast<size_t>(p_fmat->Info().num_row_); CHECK(p_fmat->SingleColBlock()); SparsePage const& batch = *(p_fmat->GetBatches<CSCPage>().begin()); auto page = batch.GetView(); if (IsEmpty()) { return; } dh::safe_cuda(hipSetDevice(ctx_->gpu_id)); // The begin and end indices for the section of each column associated with // this device std::vector<std::pair<bst_uint, bst_uint>> column_segments; row_ptr_ = {0}; // iterate through columns for (size_t fidx = 0; fidx < batch.Size(); fidx++) { common::Span<Entry const> col = page[fidx]; auto cmp = [](Entry e1, Entry e2) { return e1.index < e2.index; }; auto column_begin = std::lower_bound(col.cbegin(), col.cend(), xgboost::Entry(0, 0.0f), cmp); auto column_end = std::lower_bound(col.cbegin(), col.cend(), xgboost::Entry(num_row_, 0.0f), cmp); column_segments.emplace_back(static_cast<bst_uint>(column_begin - col.cbegin()), static_cast<bst_uint>(column_end - col.cbegin())); row_ptr_.push_back(row_ptr_.back() + (column_end - column_begin)); } data_.resize(row_ptr_.back()); gpair_.resize(num_row_ * model_param.num_output_group); for (size_t fidx = 0; fidx < batch.Size(); fidx++) { auto col = page[fidx]; auto seg = column_segments[fidx]; dh::safe_cuda(hipMemcpy( data_.data().get() + row_ptr_[fidx], col.data() + seg.first, sizeof(Entry) * (seg.second - seg.first), hipMemcpyHostToDevice)); } } void Update(HostDeviceVector<GradientPair> *in_gpair, DMatrix *p_fmat, gbm::GBLinearModel *model, double sum_instance_weight) override { tparam_.DenormalizePenalties(sum_instance_weight); monitor_.Start("LazyInitDevice"); this->LazyInitDevice(p_fmat, *(model->learner_model_param)); monitor_.Stop("LazyInitDevice"); monitor_.Start("UpdateGpair"); auto &in_gpair_host = in_gpair->ConstHostVector(); // Update gpair if (ctx_->gpu_id >= 0) { this->UpdateGpair(in_gpair_host); } monitor_.Stop("UpdateGpair"); monitor_.Start("UpdateBias"); this->UpdateBias(model); monitor_.Stop("UpdateBias"); // prepare for updating the weights selector_->Setup(*model, in_gpair->ConstHostVector(), p_fmat, tparam_.reg_alpha_denorm, tparam_.reg_lambda_denorm, coord_param_.top_k); monitor_.Start("UpdateFeature"); for (uint32_t group_idx = 0; group_idx < model->learner_model_param->num_output_group; ++group_idx) { for (auto i = 0U; i < model->learner_model_param->num_feature; i++) { auto fidx = selector_->NextFeature( i, *model, group_idx, in_gpair->ConstHostVector(), p_fmat, tparam_.reg_alpha_denorm, tparam_.reg_lambda_denorm); if (fidx < 0) break; this->UpdateFeature(fidx, group_idx, model); } } monitor_.Stop("UpdateFeature"); } void UpdateBias(gbm::GBLinearModel *model) { for (uint32_t group_idx = 0; group_idx < model->learner_model_param->num_output_group; ++group_idx) { // Get gradient auto grad = GradientPair(0, 0); if (ctx_->gpu_id >= 0) { grad = GetBiasGradient(group_idx, model->learner_model_param->num_output_group); } auto dbias = static_cast<float>( tparam_.learning_rate * CoordinateDeltaBias(grad.GetGrad(), grad.GetHess())); model->Bias()[group_idx] += dbias; // Update residual if (ctx_->gpu_id >= 0) { UpdateBiasResidual(dbias, group_idx, model->learner_model_param->num_output_group); } } } void UpdateFeature(int fidx, int group_idx, gbm::GBLinearModel *model) { bst_float &w = (*model)[fidx][group_idx]; // Get gradient auto grad = GradientPair(0, 0); if (ctx_->gpu_id >= 0) { grad = GetGradient(group_idx, model->learner_model_param->num_output_group, fidx); } auto dw = static_cast<float>(tparam_.learning_rate * CoordinateDelta(grad.GetGrad(), grad.GetHess(), w, tparam_.reg_alpha_denorm, tparam_.reg_lambda_denorm)); w += dw; if (ctx_->gpu_id >= 0) { UpdateResidual(dw, group_idx, model->learner_model_param->num_output_group, fidx); } } // This needs to be public because of the __device__ lambda. GradientPair GetBiasGradient(int group_idx, int num_group) { dh::safe_cuda(hipSetDevice(ctx_->gpu_id)); auto counting = thrust::make_counting_iterator(0ull); auto f = [=] __device__(size_t idx) { return idx * num_group + group_idx; }; // NOLINT thrust::transform_iterator<decltype(f), decltype(counting), size_t> skip( counting, f); auto perm = thrust::make_permutation_iterator(gpair_.data(), skip); return dh::SumReduction(perm, num_row_); } // This needs to be public because of the __device__ lambda. void UpdateBiasResidual(float dbias, int group_idx, int num_groups) { if (dbias == 0.0f) return; auto d_gpair = dh::ToSpan(gpair_); dh::LaunchN(num_row_, [=] __device__(size_t idx) { auto &g = d_gpair[idx * num_groups + group_idx]; g += GradientPair(g.GetHess() * dbias, 0); }); } // This needs to be public because of the __device__ lambda. GradientPair GetGradient(int group_idx, int num_group, int fidx) { dh::safe_cuda(hipSetDevice(ctx_->gpu_id)); common::Span<xgboost::Entry> d_col = dh::ToSpan(data_).subspan(row_ptr_[fidx]); size_t col_size = row_ptr_[fidx + 1] - row_ptr_[fidx]; common::Span<GradientPair> d_gpair = dh::ToSpan(gpair_); auto counting = thrust::make_counting_iterator(0ull); auto f = [=] __device__(size_t idx) { auto entry = d_col[idx]; auto g = d_gpair[entry.index * num_group + group_idx]; return GradientPair{g.GetGrad() * entry.fvalue, g.GetHess() * entry.fvalue * entry.fvalue}; }; // NOLINT thrust::transform_iterator<decltype(f), decltype(counting), GradientPair> multiply_iterator(counting, f); return dh::SumReduction(multiply_iterator, col_size); } // This needs to be public because of the __device__ lambda. void UpdateResidual(float dw, int group_idx, int num_groups, int fidx) { common::Span<GradientPair> d_gpair = dh::ToSpan(gpair_); common::Span<Entry> d_col = dh::ToSpan(data_).subspan(row_ptr_[fidx]); size_t col_size = row_ptr_[fidx + 1] - row_ptr_[fidx]; dh::LaunchN(col_size, [=] __device__(size_t idx) { auto entry = d_col[idx]; auto &g = d_gpair[entry.index * num_groups + group_idx]; g += GradientPair(g.GetHess() * dw * entry.fvalue, 0); }); } private: bool IsEmpty() { return num_row_ == 0; } void UpdateGpair(const std::vector<GradientPair> &host_gpair) { dh::safe_cuda(hipMemcpyAsync( gpair_.data().get(), host_gpair.data(), gpair_.size() * sizeof(GradientPair), hipMemcpyHostToDevice)); } // training parameter LinearTrainParam tparam_; CoordinateParam coord_param_; std::unique_ptr<FeatureSelector> selector_; common::Monitor monitor_; std::vector<size_t> row_ptr_; dh::device_vector<xgboost::Entry> data_; dh::caching_device_vector<GradientPair> gpair_; size_t num_row_; }; XGBOOST_REGISTER_LINEAR_UPDATER(GPUCoordinateUpdater, "gpu_coord_descent") .describe( "Update linear model according to coordinate descent algorithm. GPU " "accelerated.") .set_body([]() { return new GPUCoordinateUpdater(); }); } // namespace linear } // namespace xgboost
8bd12e0cd2ca21e95f71cb711c7fb8e5370c794c.cu
/** * Copyright 2018-2023 by XGBoost Contributors * \author Rory Mitchell */ #include <thrust/execution_policy.h> #include <thrust/inner_product.h> #include <xgboost/data.h> #include <xgboost/linear_updater.h> #include "xgboost/span.h" #include "coordinate_common.h" #include "../common/common.h" #include "../common/device_helpers.cuh" #include "../common/timer.h" #include "./param.h" namespace xgboost { namespace linear { DMLC_REGISTRY_FILE_TAG(updater_gpu_coordinate); /** * \class GPUCoordinateUpdater * * \brief Coordinate descent algorithm that updates one feature per iteration */ class GPUCoordinateUpdater : public LinearUpdater { // NOLINT public: // set training parameter void Configure(Args const& args) override { tparam_.UpdateAllowUnknown(args); coord_param_.UpdateAllowUnknown(args); selector_.reset(FeatureSelector::Create(tparam_.feature_selector, ctx_->Threads())); monitor_.Init("GPUCoordinateUpdater"); } void LoadConfig(Json const& in) override { auto const& config = get<Object const>(in); FromJson(config.at("linear_train_param"), &tparam_); FromJson(config.at("coordinate_param"), &coord_param_); } void SaveConfig(Json* p_out) const override { auto& out = *p_out; out["linear_train_param"] = ToJson(tparam_); out["coordinate_param"] = ToJson(coord_param_); } void LazyInitDevice(DMatrix *p_fmat, const LearnerModelParam &model_param) { if (ctx_->gpu_id < 0) return; num_row_ = static_cast<size_t>(p_fmat->Info().num_row_); CHECK(p_fmat->SingleColBlock()); SparsePage const& batch = *(p_fmat->GetBatches<CSCPage>().begin()); auto page = batch.GetView(); if (IsEmpty()) { return; } dh::safe_cuda(cudaSetDevice(ctx_->gpu_id)); // The begin and end indices for the section of each column associated with // this device std::vector<std::pair<bst_uint, bst_uint>> column_segments; row_ptr_ = {0}; // iterate through columns for (size_t fidx = 0; fidx < batch.Size(); fidx++) { common::Span<Entry const> col = page[fidx]; auto cmp = [](Entry e1, Entry e2) { return e1.index < e2.index; }; auto column_begin = std::lower_bound(col.cbegin(), col.cend(), xgboost::Entry(0, 0.0f), cmp); auto column_end = std::lower_bound(col.cbegin(), col.cend(), xgboost::Entry(num_row_, 0.0f), cmp); column_segments.emplace_back(static_cast<bst_uint>(column_begin - col.cbegin()), static_cast<bst_uint>(column_end - col.cbegin())); row_ptr_.push_back(row_ptr_.back() + (column_end - column_begin)); } data_.resize(row_ptr_.back()); gpair_.resize(num_row_ * model_param.num_output_group); for (size_t fidx = 0; fidx < batch.Size(); fidx++) { auto col = page[fidx]; auto seg = column_segments[fidx]; dh::safe_cuda(cudaMemcpy( data_.data().get() + row_ptr_[fidx], col.data() + seg.first, sizeof(Entry) * (seg.second - seg.first), cudaMemcpyHostToDevice)); } } void Update(HostDeviceVector<GradientPair> *in_gpair, DMatrix *p_fmat, gbm::GBLinearModel *model, double sum_instance_weight) override { tparam_.DenormalizePenalties(sum_instance_weight); monitor_.Start("LazyInitDevice"); this->LazyInitDevice(p_fmat, *(model->learner_model_param)); monitor_.Stop("LazyInitDevice"); monitor_.Start("UpdateGpair"); auto &in_gpair_host = in_gpair->ConstHostVector(); // Update gpair if (ctx_->gpu_id >= 0) { this->UpdateGpair(in_gpair_host); } monitor_.Stop("UpdateGpair"); monitor_.Start("UpdateBias"); this->UpdateBias(model); monitor_.Stop("UpdateBias"); // prepare for updating the weights selector_->Setup(*model, in_gpair->ConstHostVector(), p_fmat, tparam_.reg_alpha_denorm, tparam_.reg_lambda_denorm, coord_param_.top_k); monitor_.Start("UpdateFeature"); for (uint32_t group_idx = 0; group_idx < model->learner_model_param->num_output_group; ++group_idx) { for (auto i = 0U; i < model->learner_model_param->num_feature; i++) { auto fidx = selector_->NextFeature( i, *model, group_idx, in_gpair->ConstHostVector(), p_fmat, tparam_.reg_alpha_denorm, tparam_.reg_lambda_denorm); if (fidx < 0) break; this->UpdateFeature(fidx, group_idx, model); } } monitor_.Stop("UpdateFeature"); } void UpdateBias(gbm::GBLinearModel *model) { for (uint32_t group_idx = 0; group_idx < model->learner_model_param->num_output_group; ++group_idx) { // Get gradient auto grad = GradientPair(0, 0); if (ctx_->gpu_id >= 0) { grad = GetBiasGradient(group_idx, model->learner_model_param->num_output_group); } auto dbias = static_cast<float>( tparam_.learning_rate * CoordinateDeltaBias(grad.GetGrad(), grad.GetHess())); model->Bias()[group_idx] += dbias; // Update residual if (ctx_->gpu_id >= 0) { UpdateBiasResidual(dbias, group_idx, model->learner_model_param->num_output_group); } } } void UpdateFeature(int fidx, int group_idx, gbm::GBLinearModel *model) { bst_float &w = (*model)[fidx][group_idx]; // Get gradient auto grad = GradientPair(0, 0); if (ctx_->gpu_id >= 0) { grad = GetGradient(group_idx, model->learner_model_param->num_output_group, fidx); } auto dw = static_cast<float>(tparam_.learning_rate * CoordinateDelta(grad.GetGrad(), grad.GetHess(), w, tparam_.reg_alpha_denorm, tparam_.reg_lambda_denorm)); w += dw; if (ctx_->gpu_id >= 0) { UpdateResidual(dw, group_idx, model->learner_model_param->num_output_group, fidx); } } // This needs to be public because of the __device__ lambda. GradientPair GetBiasGradient(int group_idx, int num_group) { dh::safe_cuda(cudaSetDevice(ctx_->gpu_id)); auto counting = thrust::make_counting_iterator(0ull); auto f = [=] __device__(size_t idx) { return idx * num_group + group_idx; }; // NOLINT thrust::transform_iterator<decltype(f), decltype(counting), size_t> skip( counting, f); auto perm = thrust::make_permutation_iterator(gpair_.data(), skip); return dh::SumReduction(perm, num_row_); } // This needs to be public because of the __device__ lambda. void UpdateBiasResidual(float dbias, int group_idx, int num_groups) { if (dbias == 0.0f) return; auto d_gpair = dh::ToSpan(gpair_); dh::LaunchN(num_row_, [=] __device__(size_t idx) { auto &g = d_gpair[idx * num_groups + group_idx]; g += GradientPair(g.GetHess() * dbias, 0); }); } // This needs to be public because of the __device__ lambda. GradientPair GetGradient(int group_idx, int num_group, int fidx) { dh::safe_cuda(cudaSetDevice(ctx_->gpu_id)); common::Span<xgboost::Entry> d_col = dh::ToSpan(data_).subspan(row_ptr_[fidx]); size_t col_size = row_ptr_[fidx + 1] - row_ptr_[fidx]; common::Span<GradientPair> d_gpair = dh::ToSpan(gpair_); auto counting = thrust::make_counting_iterator(0ull); auto f = [=] __device__(size_t idx) { auto entry = d_col[idx]; auto g = d_gpair[entry.index * num_group + group_idx]; return GradientPair{g.GetGrad() * entry.fvalue, g.GetHess() * entry.fvalue * entry.fvalue}; }; // NOLINT thrust::transform_iterator<decltype(f), decltype(counting), GradientPair> multiply_iterator(counting, f); return dh::SumReduction(multiply_iterator, col_size); } // This needs to be public because of the __device__ lambda. void UpdateResidual(float dw, int group_idx, int num_groups, int fidx) { common::Span<GradientPair> d_gpair = dh::ToSpan(gpair_); common::Span<Entry> d_col = dh::ToSpan(data_).subspan(row_ptr_[fidx]); size_t col_size = row_ptr_[fidx + 1] - row_ptr_[fidx]; dh::LaunchN(col_size, [=] __device__(size_t idx) { auto entry = d_col[idx]; auto &g = d_gpair[entry.index * num_groups + group_idx]; g += GradientPair(g.GetHess() * dw * entry.fvalue, 0); }); } private: bool IsEmpty() { return num_row_ == 0; } void UpdateGpair(const std::vector<GradientPair> &host_gpair) { dh::safe_cuda(cudaMemcpyAsync( gpair_.data().get(), host_gpair.data(), gpair_.size() * sizeof(GradientPair), cudaMemcpyHostToDevice)); } // training parameter LinearTrainParam tparam_; CoordinateParam coord_param_; std::unique_ptr<FeatureSelector> selector_; common::Monitor monitor_; std::vector<size_t> row_ptr_; dh::device_vector<xgboost::Entry> data_; dh::caching_device_vector<GradientPair> gpair_; size_t num_row_; }; XGBOOST_REGISTER_LINEAR_UPDATER(GPUCoordinateUpdater, "gpu_coord_descent") .describe( "Update linear model according to coordinate descent algorithm. GPU " "accelerated.") .set_body([]() { return new GPUCoordinateUpdater(); }); } // namespace linear } // namespace xgboost
33379c44f9b56f14224e863fd224d73f491a20c2.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include <iostream> #include <time.h> #include "vec3.h" #include "ray.h" // limited version of checkCudaErrors from helper_cuda.h in CUDA examples #define checkCudaErrors(val) check_cuda( (val), #val, __FILE__, __LINE__ ) void check_cuda(hipError_t result, char const *const func, const char *const file, int const line) { if (result) { std::cerr << "CUDA error = " << static_cast<unsigned int>(result) << " at " << file << ":" << line << " '" << func << "' \n"; // Make sure we call CUDA Device Reset before exiting hipDeviceReset(); exit(99); } } __device__ bool hit_sphere(const vec3& center, float radius, const ray& r) { vec3 oc = r.origin() - center; float a = dot(r.direction(), r.direction()); float b = 2.0f * dot(oc, r.direction()); float c = dot(oc, oc) - radius*radius; float discriminant = b*b - 4.0f*a*c; return (discriminant > 0.0f); } __device__ vec3 color(const ray& r) { if (hit_sphere(vec3(0,0,-1), 0.5, r)) return vec3(1,0,0); vec3 unit_direction = unit_vector(r.direction()); float t = 0.5f*(unit_direction.y() + 1.0f); return (1.0f-t)*vec3(1.0, 1.0, 1.0) + t*vec3(0.5, 0.7, 1.0); } __global__ void render(vec3 *fb, int max_x, int max_y, vec3 lower_left_corner, vec3 horizontal, vec3 vertical, vec3 origin) { int i = threadIdx.x + blockIdx.x * blockDim.x; int j = threadIdx.y + blockIdx.y * blockDim.y; if((i >= max_x) || (j >= max_y)) return; int pixel_index = j*max_x + i; float u = float(i) / float(max_x); float v = float(j) / float(max_y); ray r(origin, lower_left_corner + u*horizontal + v*vertical); fb[pixel_index] = color(r); } int main() { int nx = 1200; int ny = 600; int tx = 8; int ty = 8; std::cerr << "Rendering a " << nx << "x" << ny << " image "; std::cerr << "in " << tx << "x" << ty << " blocks.\n"; int num_pixels = nx*ny; size_t fb_size = num_pixels*sizeof(vec3); // allocate FB vec3 *fb; checkCudaErrors(hipMallocManaged((void **)&fb, fb_size)); clock_t start, stop; start = clock(); // Render our buffer dim3 blocks(nx/tx+1,ny/ty+1); dim3 threads(tx,ty); hipLaunchKernelGGL(( render), dim3(blocks), dim3(threads), 0, 0, fb, nx, ny, vec3(-2.0, -1.0, -1.0), vec3(4.0, 0.0, 0.0), vec3(0.0, 2.0, 0.0), vec3(0.0, 0.0, 0.0)); checkCudaErrors(hipGetLastError()); checkCudaErrors(hipDeviceSynchronize()); stop = clock(); double timer_seconds = ((double)(stop - start)) / CLOCKS_PER_SEC; std::cerr << "took " << timer_seconds << " seconds.\n"; // Output FB as Image std::cout << "P3\n" << nx << " " << ny << "\n255\n"; for (int j = ny-1; j >= 0; j--) { for (int i = 0; i < nx; i++) { size_t pixel_index = j*nx + i; int ir = int(255.99*fb[pixel_index].r()); int ig = int(255.99*fb[pixel_index].g()); int ib = int(255.99*fb[pixel_index].b()); std::cout << ir << " " << ig << " " << ib << "\n"; } } checkCudaErrors(hipFree(fb)); }
33379c44f9b56f14224e863fd224d73f491a20c2.cu
#include <iostream> #include <time.h> #include "vec3.h" #include "ray.h" // limited version of checkCudaErrors from helper_cuda.h in CUDA examples #define checkCudaErrors(val) check_cuda( (val), #val, __FILE__, __LINE__ ) void check_cuda(cudaError_t result, char const *const func, const char *const file, int const line) { if (result) { std::cerr << "CUDA error = " << static_cast<unsigned int>(result) << " at " << file << ":" << line << " '" << func << "' \n"; // Make sure we call CUDA Device Reset before exiting cudaDeviceReset(); exit(99); } } __device__ bool hit_sphere(const vec3& center, float radius, const ray& r) { vec3 oc = r.origin() - center; float a = dot(r.direction(), r.direction()); float b = 2.0f * dot(oc, r.direction()); float c = dot(oc, oc) - radius*radius; float discriminant = b*b - 4.0f*a*c; return (discriminant > 0.0f); } __device__ vec3 color(const ray& r) { if (hit_sphere(vec3(0,0,-1), 0.5, r)) return vec3(1,0,0); vec3 unit_direction = unit_vector(r.direction()); float t = 0.5f*(unit_direction.y() + 1.0f); return (1.0f-t)*vec3(1.0, 1.0, 1.0) + t*vec3(0.5, 0.7, 1.0); } __global__ void render(vec3 *fb, int max_x, int max_y, vec3 lower_left_corner, vec3 horizontal, vec3 vertical, vec3 origin) { int i = threadIdx.x + blockIdx.x * blockDim.x; int j = threadIdx.y + blockIdx.y * blockDim.y; if((i >= max_x) || (j >= max_y)) return; int pixel_index = j*max_x + i; float u = float(i) / float(max_x); float v = float(j) / float(max_y); ray r(origin, lower_left_corner + u*horizontal + v*vertical); fb[pixel_index] = color(r); } int main() { int nx = 1200; int ny = 600; int tx = 8; int ty = 8; std::cerr << "Rendering a " << nx << "x" << ny << " image "; std::cerr << "in " << tx << "x" << ty << " blocks.\n"; int num_pixels = nx*ny; size_t fb_size = num_pixels*sizeof(vec3); // allocate FB vec3 *fb; checkCudaErrors(cudaMallocManaged((void **)&fb, fb_size)); clock_t start, stop; start = clock(); // Render our buffer dim3 blocks(nx/tx+1,ny/ty+1); dim3 threads(tx,ty); render<<<blocks, threads>>>(fb, nx, ny, vec3(-2.0, -1.0, -1.0), vec3(4.0, 0.0, 0.0), vec3(0.0, 2.0, 0.0), vec3(0.0, 0.0, 0.0)); checkCudaErrors(cudaGetLastError()); checkCudaErrors(cudaDeviceSynchronize()); stop = clock(); double timer_seconds = ((double)(stop - start)) / CLOCKS_PER_SEC; std::cerr << "took " << timer_seconds << " seconds.\n"; // Output FB as Image std::cout << "P3\n" << nx << " " << ny << "\n255\n"; for (int j = ny-1; j >= 0; j--) { for (int i = 0; i < nx; i++) { size_t pixel_index = j*nx + i; int ir = int(255.99*fb[pixel_index].r()); int ig = int(255.99*fb[pixel_index].g()); int ib = int(255.99*fb[pixel_index].b()); std::cout << ir << " " << ig << " " << ib << "\n"; } } checkCudaErrors(cudaFree(fb)); }
46cb19e812d0211b2a72fde7873cd9ef22423cf9.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /******************************************************************************* * Copyright (c) 2015-2018 Skymind, Inc. * * This program and the accompanying materials are made available under the * terms of the Apache License, Version 2.0 which is available at * https://www.apache.org/licenses/LICENSE-2.0. * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the * License for the specific language governing permissions and limitations * under the License. * * SPDX-License-Identifier: Apache-2.0 ******************************************************************************/ // // @author Yurii Shyrma ([email protected]), created on 07.03.2019 // #include <ops/declarable/helpers/gather.h> #include <numeric> #include <PointersManager.h> #include <ShapeUtils.h> namespace nd4j { namespace ops { namespace helpers { template<typename X, typename Y> __global__ static void gatherCudaLinearKernel(const void* vx, const Nd4jLong* xShapeInfo, const void* vy, const Nd4jLong* yShapeInfo, void* vz, const Nd4jLong* zShapeInfo) { __shared__ const X* x; __shared__ const Y* y; __shared__ X* z; __shared__ Nd4jLong xLen, yLen, zLen; if (threadIdx.x == 0) { x = reinterpret_cast<const X*>(vx); z = reinterpret_cast<X*>(vz); y = reinterpret_cast<const Y *>(vy); xLen = shape::length(xShapeInfo); yLen = shape::length(yShapeInfo); zLen = shape::length(zShapeInfo); } __syncthreads(); //const Nd4jLong zLen = shape::length(zShapeInfo); auto start = blockIdx.x * blockDim.x + threadIdx.x; auto step = blockDim.x * gridDim.x; for (int j = start; j < zLen; j += step) { auto zIndex = shape::getIndexOffset(j, zShapeInfo); auto yIndex = shape::getIndexOffset(j, yShapeInfo); auto xIndex = shape::getIndexOffset(y[yIndex], xShapeInfo); z[zIndex] = x[xIndex]; } } ////////////////////////////////////////////////////////////////////// template<typename X, typename Y> __global__ static void gatherCuda(const int numOfSubArrs, const void* vx, const Nd4jLong* xShapeInfo, const Nd4jLong* xOffsets, const void* vy, const Nd4jLong* yShapeInfo, void* vz, const Nd4jLong* zShapeInfo, const Nd4jLong* zOffsets) { const Y* y = reinterpret_cast<const Y*>(vy); __shared__ const X* x; __shared__ X* z; const Nd4jLong len = shape::length(xShapeInfo); //const Nd4jLong zLen = shape::length(zShapeInfo); for (int i = blockIdx.x; i < numOfSubArrs; i += gridDim.x) { if (threadIdx.x == 0) { x = reinterpret_cast<const X*>(vx) + xOffsets[y[shape::getIndexOffset(i, yShapeInfo)]]; z = reinterpret_cast<X*>(vz) + zOffsets[i]; } __syncthreads(); for (int j = threadIdx.x; j < len; j += blockDim.x) { auto zIndex = shape::getIndexOffset(j, zShapeInfo); auto xIndex = shape::getIndexOffset(j, xShapeInfo); z[zIndex] = x[xIndex]; } __syncthreads(); } } template<typename X, typename Y> __host__ static void gatherCudaLinear(const hipStream_t *stream, const void* vx, const Nd4jLong* xShapeInfo, const void* vy, const Nd4jLong* yShapeInfo, void* vz, const Nd4jLong* zShapeInfo) { hipLaunchKernelGGL(( gatherCudaLinearKernel<X,Y>), dim3(128), dim3(256), 1024, *stream, vx, xShapeInfo, vy, yShapeInfo, vz, zShapeInfo); } ////////////////////////////////////////////////////////////////////// template<typename X, typename Y> __host__ static void gatherCudaLauncher(const hipStream_t *stream, const int numOfSubArrs, const void* vx, const Nd4jLong* xShapeInfo, const Nd4jLong* xOffsets, const void* vy, const Nd4jLong* yShapeInfo, void* vz, const Nd4jLong* zShapeInfo, const Nd4jLong* zOffsets) { hipLaunchKernelGGL(( gatherCuda<X,Y>), dim3(numOfSubArrs), dim3(MAX_NUM_THREADS), 1024, *stream, numOfSubArrs, vx, xShapeInfo, xOffsets, vy, yShapeInfo, vz, zShapeInfo, zOffsets); } ////////////////////////////////////////////////////////////////////// void gather(nd4j::LaunchContext * context, const NDArray* input, const NDArray* indices, NDArray* output, const std::vector<int>& intArgs) { const int inputRank = input->rankOf(); const int numOfIntArgs = intArgs.size(); int axis = numOfIntArgs > 0 ? intArgs[0] : 0; if(axis < 0) axis += inputRank; if (indices == nullptr && numOfIntArgs == 2) { // scalar case output->assign((*input)(intArgs[1], {axis})); } else if (indices != nullptr && indices->isScalar()) { if(input->rankOf() <= 1) { //For scalar indices, rank 0 or 1 input: can't do tensor along dimension 0 as this is whole array... instead, we want to get a scalar auto idx = indices->e<Nd4jLong>(0); auto scalarNDArray = input->e(idx); output->assign(scalarNDArray); } else { NDArray inSubArr = (*input)(indices->e<Nd4jLong>(0), {axis}); output->assign(inSubArr); } } else { NDArray* pIndices = const_cast<NDArray*>(indices); if(indices == nullptr) pIndices = new NDArray(input->ordering(), {numOfIntArgs-1}, std::vector<double>(intArgs.begin() + 1, intArgs.end()), DataType::INT64, input->getContext()); std::vector<int> dimsOut(pIndices->rankOf()); std::iota(dimsOut.begin(), dimsOut.end(), axis); // fill with axis, axis+1, ... axis+pIndices->rankOf()-1 const Nd4jLong numOfSubArrs = pIndices->lengthOf(); Nd4jLong *outSubArrShapeInfo(nullptr), *inSubArrShapeInfo(nullptr), *outSubArrOffsets(nullptr), *inSubArrOffsets(nullptr); input-> getSubArrShapeAndOffsets({axis}, inSubArrShapeInfo, inSubArrOffsets); output->getSubArrShapeAndOffsets(dimsOut, outSubArrShapeInfo, outSubArrOffsets); if (output->rankOf() > 1) { PointersManager manager(context, "gather"); auto xShapeInfo = reinterpret_cast<Nd4jLong *>(manager.replicatePointer(inSubArrShapeInfo, shape::shapeInfoByteLength( inSubArrShapeInfo))); auto zShapeInfo = reinterpret_cast<Nd4jLong *>(manager.replicatePointer(outSubArrShapeInfo, shape::shapeInfoByteLength( outSubArrShapeInfo))); auto xOffsets = reinterpret_cast<Nd4jLong *>(manager.replicatePointer(inSubArrOffsets, (input->lengthOf() / shape::length( inSubArrShapeInfo)) * sizeof(Nd4jLong))); auto zOffsets = reinterpret_cast<Nd4jLong *>(manager.replicatePointer(outSubArrOffsets, (output->lengthOf() / shape::length(outSubArrShapeInfo)) * sizeof(Nd4jLong))); NDArray::prepareSpecialUse({output}, {input, pIndices}); BUILD_DOUBLE_SELECTOR(input->dataType(), pIndices->dataType(), gatherCudaLauncher, (context->getCudaStream(), numOfSubArrs, input->getSpecialBuffer(), xShapeInfo, xOffsets, pIndices->getSpecialBuffer(), pIndices->getSpecialShapeInfo(), output->getSpecialBuffer(), zShapeInfo, zOffsets), LIBND4J_TYPES, INDEXING_TYPES); NDArray::registerSpecialUse({output}, {input, pIndices}); manager.synchronize(); } else { NDArray::prepareSpecialUse({output}, {input, pIndices}); BUILD_DOUBLE_SELECTOR(input->dataType(), pIndices->dataType(), gatherCudaLinear, (context->getCudaStream(), input->getSpecialBuffer(), input->getSpecialShapeInfo(), pIndices->getSpecialBuffer(), pIndices->getSpecialShapeInfo(), output->specialBuffer(), output->specialShapeInfo()), LIBND4J_TYPES, INDEXING_TYPES); NDArray::registerSpecialUse({output}, {input, pIndices}); } if(indices == nullptr) delete pIndices; } } } } }
46cb19e812d0211b2a72fde7873cd9ef22423cf9.cu
/******************************************************************************* * Copyright (c) 2015-2018 Skymind, Inc. * * This program and the accompanying materials are made available under the * terms of the Apache License, Version 2.0 which is available at * https://www.apache.org/licenses/LICENSE-2.0. * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the * License for the specific language governing permissions and limitations * under the License. * * SPDX-License-Identifier: Apache-2.0 ******************************************************************************/ // // @author Yurii Shyrma ([email protected]), created on 07.03.2019 // #include <ops/declarable/helpers/gather.h> #include <numeric> #include <PointersManager.h> #include <ShapeUtils.h> namespace nd4j { namespace ops { namespace helpers { template<typename X, typename Y> __global__ static void gatherCudaLinearKernel(const void* vx, const Nd4jLong* xShapeInfo, const void* vy, const Nd4jLong* yShapeInfo, void* vz, const Nd4jLong* zShapeInfo) { __shared__ const X* x; __shared__ const Y* y; __shared__ X* z; __shared__ Nd4jLong xLen, yLen, zLen; if (threadIdx.x == 0) { x = reinterpret_cast<const X*>(vx); z = reinterpret_cast<X*>(vz); y = reinterpret_cast<const Y *>(vy); xLen = shape::length(xShapeInfo); yLen = shape::length(yShapeInfo); zLen = shape::length(zShapeInfo); } __syncthreads(); //const Nd4jLong zLen = shape::length(zShapeInfo); auto start = blockIdx.x * blockDim.x + threadIdx.x; auto step = blockDim.x * gridDim.x; for (int j = start; j < zLen; j += step) { auto zIndex = shape::getIndexOffset(j, zShapeInfo); auto yIndex = shape::getIndexOffset(j, yShapeInfo); auto xIndex = shape::getIndexOffset(y[yIndex], xShapeInfo); z[zIndex] = x[xIndex]; } } ////////////////////////////////////////////////////////////////////// template<typename X, typename Y> __global__ static void gatherCuda(const int numOfSubArrs, const void* vx, const Nd4jLong* xShapeInfo, const Nd4jLong* xOffsets, const void* vy, const Nd4jLong* yShapeInfo, void* vz, const Nd4jLong* zShapeInfo, const Nd4jLong* zOffsets) { const Y* y = reinterpret_cast<const Y*>(vy); __shared__ const X* x; __shared__ X* z; const Nd4jLong len = shape::length(xShapeInfo); //const Nd4jLong zLen = shape::length(zShapeInfo); for (int i = blockIdx.x; i < numOfSubArrs; i += gridDim.x) { if (threadIdx.x == 0) { x = reinterpret_cast<const X*>(vx) + xOffsets[y[shape::getIndexOffset(i, yShapeInfo)]]; z = reinterpret_cast<X*>(vz) + zOffsets[i]; } __syncthreads(); for (int j = threadIdx.x; j < len; j += blockDim.x) { auto zIndex = shape::getIndexOffset(j, zShapeInfo); auto xIndex = shape::getIndexOffset(j, xShapeInfo); z[zIndex] = x[xIndex]; } __syncthreads(); } } template<typename X, typename Y> __host__ static void gatherCudaLinear(const cudaStream_t *stream, const void* vx, const Nd4jLong* xShapeInfo, const void* vy, const Nd4jLong* yShapeInfo, void* vz, const Nd4jLong* zShapeInfo) { gatherCudaLinearKernel<X,Y><<<128, 256, 1024, *stream>>>(vx, xShapeInfo, vy, yShapeInfo, vz, zShapeInfo); } ////////////////////////////////////////////////////////////////////// template<typename X, typename Y> __host__ static void gatherCudaLauncher(const cudaStream_t *stream, const int numOfSubArrs, const void* vx, const Nd4jLong* xShapeInfo, const Nd4jLong* xOffsets, const void* vy, const Nd4jLong* yShapeInfo, void* vz, const Nd4jLong* zShapeInfo, const Nd4jLong* zOffsets) { gatherCuda<X,Y><<<numOfSubArrs, MAX_NUM_THREADS, 1024, *stream>>>(numOfSubArrs, vx, xShapeInfo, xOffsets, vy, yShapeInfo, vz, zShapeInfo, zOffsets); } ////////////////////////////////////////////////////////////////////// void gather(nd4j::LaunchContext * context, const NDArray* input, const NDArray* indices, NDArray* output, const std::vector<int>& intArgs) { const int inputRank = input->rankOf(); const int numOfIntArgs = intArgs.size(); int axis = numOfIntArgs > 0 ? intArgs[0] : 0; if(axis < 0) axis += inputRank; if (indices == nullptr && numOfIntArgs == 2) { // scalar case output->assign((*input)(intArgs[1], {axis})); } else if (indices != nullptr && indices->isScalar()) { if(input->rankOf() <= 1) { //For scalar indices, rank 0 or 1 input: can't do tensor along dimension 0 as this is whole array... instead, we want to get a scalar auto idx = indices->e<Nd4jLong>(0); auto scalarNDArray = input->e(idx); output->assign(scalarNDArray); } else { NDArray inSubArr = (*input)(indices->e<Nd4jLong>(0), {axis}); output->assign(inSubArr); } } else { NDArray* pIndices = const_cast<NDArray*>(indices); if(indices == nullptr) pIndices = new NDArray(input->ordering(), {numOfIntArgs-1}, std::vector<double>(intArgs.begin() + 1, intArgs.end()), DataType::INT64, input->getContext()); std::vector<int> dimsOut(pIndices->rankOf()); std::iota(dimsOut.begin(), dimsOut.end(), axis); // fill with axis, axis+1, ... axis+pIndices->rankOf()-1 const Nd4jLong numOfSubArrs = pIndices->lengthOf(); Nd4jLong *outSubArrShapeInfo(nullptr), *inSubArrShapeInfo(nullptr), *outSubArrOffsets(nullptr), *inSubArrOffsets(nullptr); input-> getSubArrShapeAndOffsets({axis}, inSubArrShapeInfo, inSubArrOffsets); output->getSubArrShapeAndOffsets(dimsOut, outSubArrShapeInfo, outSubArrOffsets); if (output->rankOf() > 1) { PointersManager manager(context, "gather"); auto xShapeInfo = reinterpret_cast<Nd4jLong *>(manager.replicatePointer(inSubArrShapeInfo, shape::shapeInfoByteLength( inSubArrShapeInfo))); auto zShapeInfo = reinterpret_cast<Nd4jLong *>(manager.replicatePointer(outSubArrShapeInfo, shape::shapeInfoByteLength( outSubArrShapeInfo))); auto xOffsets = reinterpret_cast<Nd4jLong *>(manager.replicatePointer(inSubArrOffsets, (input->lengthOf() / shape::length( inSubArrShapeInfo)) * sizeof(Nd4jLong))); auto zOffsets = reinterpret_cast<Nd4jLong *>(manager.replicatePointer(outSubArrOffsets, (output->lengthOf() / shape::length(outSubArrShapeInfo)) * sizeof(Nd4jLong))); NDArray::prepareSpecialUse({output}, {input, pIndices}); BUILD_DOUBLE_SELECTOR(input->dataType(), pIndices->dataType(), gatherCudaLauncher, (context->getCudaStream(), numOfSubArrs, input->getSpecialBuffer(), xShapeInfo, xOffsets, pIndices->getSpecialBuffer(), pIndices->getSpecialShapeInfo(), output->getSpecialBuffer(), zShapeInfo, zOffsets), LIBND4J_TYPES, INDEXING_TYPES); NDArray::registerSpecialUse({output}, {input, pIndices}); manager.synchronize(); } else { NDArray::prepareSpecialUse({output}, {input, pIndices}); BUILD_DOUBLE_SELECTOR(input->dataType(), pIndices->dataType(), gatherCudaLinear, (context->getCudaStream(), input->getSpecialBuffer(), input->getSpecialShapeInfo(), pIndices->getSpecialBuffer(), pIndices->getSpecialShapeInfo(), output->specialBuffer(), output->specialShapeInfo()), LIBND4J_TYPES, INDEXING_TYPES); NDArray::registerSpecialUse({output}, {input, pIndices}); } if(indices == nullptr) delete pIndices; } } } } }
45f71e4117ad1a15f600b2b2a7922823b15a2811.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /* * http://github.com/dusty-nv/jetson-inference */ #include "cudaFont.h" #include "cudaMappedMemory.h" #include "loadImage.h" // constructor cudaFont::cudaFont() { mCommandCPU = NULL; mCommandGPU = NULL; mCmdEntries = 0; mFontMapCPU = NULL; mFontMapGPU = NULL; mFontMapWidth = 0; mFontMapHeight = 0; mFontCellSize = make_int2(24,32); } // destructor cudaFont::~cudaFont() { if( mFontMapCPU != NULL ) { CUDA(hipHostFree(mFontMapCPU)); mFontMapCPU = NULL; mFontMapGPU = NULL; } } // Create cudaFont* cudaFont::Create( const char* bitmap_path ) { cudaFont* c = new cudaFont(); if( !c ) return NULL; if( !c->init(bitmap_path) ) return NULL; return c; } // init bool cudaFont::init( const char* bitmap_path ) { if( !loadImageRGBA(bitmap_path, &mFontMapCPU, &mFontMapGPU, &mFontMapWidth, &mFontMapHeight) ) return false; if( !cudaAllocMapped((void**)&mCommandCPU, (void**)&mCommandGPU, sizeof(short4) * MaxCommands) ) return false; return true; } inline __host__ __device__ float4 operator*(float4 a, float4 b) { return make_float4(a.x * b.x, a.y * b.y, a.z * b.z, a.w * b.w); } template<typename T> __global__ void gpuOverlayText( T* font, int fontWidth, short4* text, T* output, int width, int height, float4 color ) { const short4 t = text[blockIdx.x]; //printf("%i %hi %hi %hi %hi\n", blockIdx.x, t.x, t.y, t.z, t.w); const int x = t.x + threadIdx.x; const int y = t.y + threadIdx.y; if( x < 0 || y < 0 || x >= width || y >= height ) return; const int u = t.z + threadIdx.x; const int v = t.w + threadIdx.y; //printf("%i %i %i %i %i\n", blockIdx.x, x, y, u, v); const T px_font = font[v * fontWidth + u] * color; T px_out = output[y * width + x]; // fixme: add proper input support const float alpha = px_font.w / 255.0f; const float ialph = 1.0f - alpha; px_out.x = alpha * px_font.x + ialph * px_out.x; px_out.y = alpha * px_font.y + ialph * px_out.y; px_out.z = alpha * px_font.z + ialph * px_out.z; output[y * width + x] = px_out; } // processCUDA template<typename T> hipError_t cudaOverlayText( T* font, const int2& fontCellSize, size_t fontMapWidth, const float4& fontColor, short4* text, size_t length, T* output, size_t width, size_t height) { if( !font || !text || !output || length == 0 || width == 0 || height == 0 ) return hipErrorInvalidValue; const float4 color_scale = make_float4( fontColor.x / 255.0f, fontColor.y / 255.0f, fontColor.z / 255.0f, fontColor.w / 255.0f ); // setup arguments const dim3 block(fontCellSize.x, fontCellSize.y); const dim3 grid(length); hipLaunchKernelGGL(( gpuOverlayText), dim3(grid), dim3(block), 0, 0, font, fontMapWidth, text, output, width, height, color_scale); return hipGetLastError(); } // RenderOverlay bool cudaFont::RenderOverlay( float4* input, float4* output, uint32_t width, uint32_t height, const std::vector< std::pair< std::string, int2 > >& text, const float4& color ) { if( !input || !output || width == 0 || height == 0 || text.size() == 0 ) return false; const uint32_t cellsPerRow = mFontMapWidth / mFontCellSize.x; const uint32_t numText = text.size(); for( uint32_t t=0; t < numText; t++ ) { const uint32_t numChars = text[t].first.size(); int2 pos = text[t].second; for( uint32_t n=0; n < numChars; n++ ) { char c = text[t].first[n]; if( c < 32 || c > 126 ) continue; c -= 32; const uint32_t font_y = c / cellsPerRow; const uint32_t font_x = c - (font_y * cellsPerRow); mCommandCPU[mCmdEntries++] = make_short4( pos.x, pos.y, font_x * (mFontCellSize.x + 1), font_y * (mFontCellSize.y + 1) ); pos.x += mFontCellSize.x; } } CUDA(cudaOverlayText<float4>( mFontMapGPU, mFontCellSize, mFontMapWidth, color, mCommandGPU, mCmdEntries, output, width, height)); mCmdEntries = 0; return true; } bool cudaFont::RenderOverlay( float4* input, float4* output, uint32_t width, uint32_t height, const char* str, int x, int y, const float4& color ) { if( !str ) return NULL; std::vector< std::pair< std::string, int2 > > list; list.push_back( std::pair< std::string, int2 >( str, make_int2(x,y) )); return RenderOverlay(input, output, width, height, list, color); }
45f71e4117ad1a15f600b2b2a7922823b15a2811.cu
/* * http://github.com/dusty-nv/jetson-inference */ #include "cudaFont.h" #include "cudaMappedMemory.h" #include "loadImage.h" // constructor cudaFont::cudaFont() { mCommandCPU = NULL; mCommandGPU = NULL; mCmdEntries = 0; mFontMapCPU = NULL; mFontMapGPU = NULL; mFontMapWidth = 0; mFontMapHeight = 0; mFontCellSize = make_int2(24,32); } // destructor cudaFont::~cudaFont() { if( mFontMapCPU != NULL ) { CUDA(cudaFreeHost(mFontMapCPU)); mFontMapCPU = NULL; mFontMapGPU = NULL; } } // Create cudaFont* cudaFont::Create( const char* bitmap_path ) { cudaFont* c = new cudaFont(); if( !c ) return NULL; if( !c->init(bitmap_path) ) return NULL; return c; } // init bool cudaFont::init( const char* bitmap_path ) { if( !loadImageRGBA(bitmap_path, &mFontMapCPU, &mFontMapGPU, &mFontMapWidth, &mFontMapHeight) ) return false; if( !cudaAllocMapped((void**)&mCommandCPU, (void**)&mCommandGPU, sizeof(short4) * MaxCommands) ) return false; return true; } inline __host__ __device__ float4 operator*(float4 a, float4 b) { return make_float4(a.x * b.x, a.y * b.y, a.z * b.z, a.w * b.w); } template<typename T> __global__ void gpuOverlayText( T* font, int fontWidth, short4* text, T* output, int width, int height, float4 color ) { const short4 t = text[blockIdx.x]; //printf("%i %hi %hi %hi %hi\n", blockIdx.x, t.x, t.y, t.z, t.w); const int x = t.x + threadIdx.x; const int y = t.y + threadIdx.y; if( x < 0 || y < 0 || x >= width || y >= height ) return; const int u = t.z + threadIdx.x; const int v = t.w + threadIdx.y; //printf("%i %i %i %i %i\n", blockIdx.x, x, y, u, v); const T px_font = font[v * fontWidth + u] * color; T px_out = output[y * width + x]; // fixme: add proper input support const float alpha = px_font.w / 255.0f; const float ialph = 1.0f - alpha; px_out.x = alpha * px_font.x + ialph * px_out.x; px_out.y = alpha * px_font.y + ialph * px_out.y; px_out.z = alpha * px_font.z + ialph * px_out.z; output[y * width + x] = px_out; } // processCUDA template<typename T> cudaError_t cudaOverlayText( T* font, const int2& fontCellSize, size_t fontMapWidth, const float4& fontColor, short4* text, size_t length, T* output, size_t width, size_t height) { if( !font || !text || !output || length == 0 || width == 0 || height == 0 ) return cudaErrorInvalidValue; const float4 color_scale = make_float4( fontColor.x / 255.0f, fontColor.y / 255.0f, fontColor.z / 255.0f, fontColor.w / 255.0f ); // setup arguments const dim3 block(fontCellSize.x, fontCellSize.y); const dim3 grid(length); gpuOverlayText<<<grid, block>>>(font, fontMapWidth, text, output, width, height, color_scale); return cudaGetLastError(); } // RenderOverlay bool cudaFont::RenderOverlay( float4* input, float4* output, uint32_t width, uint32_t height, const std::vector< std::pair< std::string, int2 > >& text, const float4& color ) { if( !input || !output || width == 0 || height == 0 || text.size() == 0 ) return false; const uint32_t cellsPerRow = mFontMapWidth / mFontCellSize.x; const uint32_t numText = text.size(); for( uint32_t t=0; t < numText; t++ ) { const uint32_t numChars = text[t].first.size(); int2 pos = text[t].second; for( uint32_t n=0; n < numChars; n++ ) { char c = text[t].first[n]; if( c < 32 || c > 126 ) continue; c -= 32; const uint32_t font_y = c / cellsPerRow; const uint32_t font_x = c - (font_y * cellsPerRow); mCommandCPU[mCmdEntries++] = make_short4( pos.x, pos.y, font_x * (mFontCellSize.x + 1), font_y * (mFontCellSize.y + 1) ); pos.x += mFontCellSize.x; } } CUDA(cudaOverlayText<float4>( mFontMapGPU, mFontCellSize, mFontMapWidth, color, mCommandGPU, mCmdEntries, output, width, height)); mCmdEntries = 0; return true; } bool cudaFont::RenderOverlay( float4* input, float4* output, uint32_t width, uint32_t height, const char* str, int x, int y, const float4& color ) { if( !str ) return NULL; std::vector< std::pair< std::string, int2 > > list; list.push_back( std::pair< std::string, int2 >( str, make_int2(x,y) )); return RenderOverlay(input, output, width, height, list, color); }
97a1013a140acc19936b5e2135a849dce07f10dc.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include <ATen/ATen.h> #include <ATen/hip/Atomic.cuh> #include <ATen/hip/HIPContext.h> #include <ATen/NativeFunctions.h> #include <ATen/TensorUtils.h> #include <ATen/Utils.h> #include <c10/util/Exception.h> #include <THH/THHGeneral.h> #include <THH/THHNumerics.cuh> #include <algorithm> #include <cfloat> #include <cmath> namespace at { namespace native { namespace { __device__ inline int start_index(int a, int b, int c) { return (int)::floor((float)(a * c) / b); } __device__ inline int end_index(int a, int b, int c) { return (int)::ceil((float)((a + 1) * c) / b); } // 4d tensor B x D x H x W /* * Description: * this function adaptively maxpools an input 4D tensor along dimensions 2 and 3 * 4D input, 4D output, 4D argmax x and y */ template <typename T> __global__ void adaptivemaxpool(T *input, T *output, int64_t *indices, int isizeH, int isizeW, int osizeH, int osizeW, int64_t istrideD, int64_t istrideH, int64_t istrideW) { // iterators int oh, ow; // compute offsets based on thread/block ID int o_plane = blockIdx.x; int i_plane = o_plane; int ostartW = threadIdx.x; int oendW = osizeW; const int ostepW = blockDim.x; int ostartH = blockDim.y*blockIdx.y + threadIdx.y; int oendH = osizeH; const int ostepH = blockDim.y*gridDim.y; // select input/output plane output = output + o_plane*osizeH*osizeW; input = input + i_plane*istrideD; indices = indices + o_plane*osizeH*osizeW; // For all output pixels... for(oh = ostartH; oh < oendH; oh += ostepH) { int istartH = start_index(oh, osizeH, isizeH); int iendH = end_index(oh, osizeH, isizeH); int kH = iendH - istartH; for(ow = ostartW; ow < oendW; ow += ostepW) { int istartW = start_index(ow, osizeW, isizeW); int iendW = end_index(ow, osizeW, isizeW); int kW = iendW - istartW; // Compute the mean of the input image... T *ptr_input = input + istartH*istrideH + istartW*istrideW; T *ptr_output = output + oh*osizeW + ow; int64_t *ptr_ind = indices + oh*osizeW + ow; int argmax = istartH * isizeW + istartW; T max = at::numeric_limits<T>::lower_bound(); // -Infinity int ih, iw; for(ih = 0; ih < kH; ih++) { for(iw = 0; iw < kW; iw++) { T val = ptr_input[iw*istrideW]; if ((val > max) || THCNumerics<T>::isnan(val)) { max = val; argmax = (ih+istartH)*isizeW + iw+istartW; } } ptr_input += istrideH; // next input line } // Update output and argmax *ptr_output = max; *ptr_ind = argmax; } } } /* * Description: * this function computes the gradInput from weight and gradOutput */ template <typename T> __global__ void adaptivemaxgradinput(T *gradInput, T *gradOutput, int64_t *indices, int isizeH, int isizeW, int osizeH, int osizeW) { // iterators int oh, ow; // compute offsets based on thread/block ID int o_plane = blockIdx.x; int i_plane = o_plane; //int k = blockIdx.x % sizeD; int ostartW = threadIdx.x; int oendW = osizeW; int ostepW = blockDim.x; int ostartH = blockDim.y*blockIdx.y + threadIdx.y; int oendH = osizeH; int ostepH = blockDim.y*gridDim.y; // select input/output plane gradOutput = gradOutput + o_plane*osizeH*osizeW; gradInput = gradInput + i_plane*isizeH*isizeW; indices = indices + o_plane*osizeH*osizeW; // compute gradInput for(oh = ostartH; oh < oendH; oh += ostepH) { for(ow = ostartW; ow < oendW; ow += ostepW) { T *ptr_gradOutput = gradOutput + oh*osizeW + ow; int64_t *ptr_ind = indices + oh*osizeW + ow; T z = *ptr_gradOutput; int argmax = (*ptr_ind); gradInput[argmax] += z; } } } /* * Description: * this function computes the gradInput from weight and gradOutput * when kH != dH or kW != dW (uses atomic add) */ template <typename T> __global__ void atomicadaptivemaxgradinput( T *gradInput, T *gradOutput, int64_t *indices, int isizeH, int isizeW, int osizeH, int osizeW ) { // iterators int oh, ow; // compute offsets based on thread/block ID int o_plane = blockIdx.x; int i_plane = o_plane; int ostartW = threadIdx.x; int oendW = osizeW; int ostepW = blockDim.x; int ostartH = blockDim.y*blockIdx.y + threadIdx.y; int oendH = osizeH; int ostepH = blockDim.y*gridDim.y; // select input/output plane gradOutput = gradOutput + o_plane*osizeH*osizeW; gradInput = gradInput + i_plane*isizeH*isizeW; indices = indices + o_plane*osizeH*osizeW; // compute gradInput for(oh = ostartH; oh < oendH; oh += ostepH) { for(ow = ostartW; ow < oendW; ow += ostepW) { T *ptr_gradOutput = gradOutput + oh*osizeW + ow; int64_t *ptr_ind = indices + oh*osizeW + ow; T z = *ptr_gradOutput; int argmax = (*ptr_ind); // atomic add since different threads could update same variable gpuAtomicAddNoReturn(&(gradInput[argmax]), z); } } } } // namespace // 4d tensor B x D x H x W TORCH_IMPL_FUNC(adaptive_max_pool2d_out_cuda) (const Tensor& input, IntArrayRef output_size, const Tensor& output, const Tensor& indices) { TensorArg output_arg{output, "output", 1}; TensorArg indices_arg{indices, "indices", 2}; TensorArg input_arg{input, "input", 3}; checkAllSameGPU( __func__, {output_arg, indices_arg, input_arg}); if (input.numel() == 0) { return; } int64_t osizeH = output_size[0]; int64_t osizeW = output_size[1]; if (input.ndimension() == 3) { int64_t sizeD = input.size(0); int64_t isizeH = input.size(1); int64_t isizeW = input.size(2); int64_t istrideD = input.stride(0); int64_t istrideH = input.stride(1); int64_t istrideW = input.stride(2); AT_DISPATCH_FLOATING_TYPES_AND2( kHalf, kBFloat16, input.scalar_type(), "adaptive_max_pool2d_cuda", [&] { scalar_t* input_data = input.data_ptr<scalar_t>(); scalar_t* output_data = output.data_ptr<scalar_t>(); int64_t* indices_data = indices.data_ptr<int64_t>(); // cuda blocks & threads: int blocksH = (int)(16L / sizeD); blocksH = blocksH < 1 ? 1 : blocksH; dim3 blocks(sizeD, blocksH); dim3 threads(32, 8); // run maxpool kernel hipLaunchKernelGGL(( adaptivemaxpool), dim3(blocks), dim3(threads), 0, at::hip::getCurrentHIPStreamMasqueradingAsCUDA(), input_data, output_data, indices_data, isizeH, isizeW, osizeH, osizeW, istrideD, istrideH, istrideW); C10_HIP_KERNEL_LAUNCH_CHECK(); }); } else { Tensor input_ = input.contiguous(); int64_t sizeB = input_.size(0); int64_t sizeD = input_.size(1); int64_t isizeH = input_.size(2); int64_t isizeW = input_.size(3); int64_t istrideD = input_.stride(1); int64_t istrideH = input_.stride(2); int64_t istrideW = input_.stride(3); AT_DISPATCH_FLOATING_TYPES_AND2( kHalf, kBFloat16, input_.scalar_type(), "adaptive_max_pool2d_cuda", [&] { scalar_t* input_data = input_.data_ptr<scalar_t>(); scalar_t* output_data = output.data_ptr<scalar_t>(); int64_t* indices_data = indices.data_ptr<int64_t>(); // cuda blocks & threads: int blocksH = (int)(16L / sizeD); blocksH = blocksH < 1 ? 1 : blocksH; dim3 blocks(sizeB * sizeD, blocksH); dim3 threads(32, 8); // run maxpool kernel hipLaunchKernelGGL(( adaptivemaxpool), dim3(blocks), dim3(threads), 0, at::hip::getCurrentHIPStreamMasqueradingAsCUDA(), input_data, output_data, indices_data, isizeH, isizeW, osizeH, osizeW, istrideD, istrideH, istrideW); C10_HIP_KERNEL_LAUNCH_CHECK(); }); } } TORCH_IMPL_FUNC(adaptive_max_pool2d_backward_out_cuda) (const Tensor& gradOutput, const Tensor& input, const Tensor& indices, const Tensor& gradInput) { globalContext().alertNotDeterministic( "adaptive_max_pool2d_backward_cuda"); TensorArg grad_input_arg{gradInput, "gradInput", 1}; TensorArg grad_output_arg{gradOutput, "gradOutput", 2}; TensorArg input_arg{input, "input", 3}; TensorArg indices_arg{indices, "indices", 4}; checkAllSameGPU( __func__, {grad_input_arg, grad_output_arg, input_arg, indices_arg}); if (gradOutput.numel() == 0) { return; } bool atomic = true; // suboptimal, but without atomic it doesn't pass the tests Tensor gradOutput_ = gradOutput.contiguous(); if (input.ndimension() == 3) { int64_t sizeD = input.size(0); int64_t isizeH = input.size(1); int64_t isizeW = input.size(2); int64_t osizeH = gradOutput_.size(1); int64_t osizeW = gradOutput_.size(2); // bool atomic = (isizeH%osizeH != 0) || (isizeW%osizeW != 0); gradInput.zero_(); AT_DISPATCH_FLOATING_TYPES_AND2( kHalf, kBFloat16, input.scalar_type(), "adaptive_max_pool2d_backward_cuda", [&] { scalar_t* gradInput_data = gradInput.data_ptr<scalar_t>(); scalar_t* gradOutput_data = gradOutput_.data_ptr<scalar_t>(); int64_t* indices_data = indices.data_ptr<int64_t>(); // cuda blocks & threads: int blocksH = (int)(16L / sizeD); blocksH = blocksH < 1 ? 1 : blocksH; dim3 blocks(sizeD, blocksH); dim3 threads(32, 8); if (atomic) { // run updateGradInput kernel, accumulate gradients atomically hipLaunchKernelGGL(( atomicadaptivemaxgradinput), dim3(blocks), dim3(threads), 0, at::hip::getCurrentHIPStreamMasqueradingAsCUDA(), gradInput_data, gradOutput_data, indices_data, isizeH, isizeW, osizeH, osizeW); C10_HIP_KERNEL_LAUNCH_CHECK(); } else { // run updateGradInput kernel hipLaunchKernelGGL(( atomicadaptivemaxgradinput), dim3(blocks), dim3(threads), 0, at::hip::getCurrentHIPStreamMasqueradingAsCUDA(), gradInput_data, gradOutput_data, indices_data, isizeH, isizeW, osizeH, osizeW); C10_HIP_KERNEL_LAUNCH_CHECK(); } }); } else { int64_t sizeB = input.size(0); int64_t sizeD = input.size(1); int64_t isizeH = input.size(2); int64_t isizeW = input.size(3); int64_t osizeH = gradOutput_.size(2); int64_t osizeW = gradOutput_.size(3); gradInput.zero_(); // bool atomic = (isizeH%osizeH != 0) || (isizeW%osizeW != 0); AT_DISPATCH_FLOATING_TYPES_AND2( kHalf, kBFloat16, input.scalar_type(), "adaptive_max_pool2d_backward_cuda", [&] { scalar_t* gradInput_data = gradInput.data_ptr<scalar_t>(); scalar_t* gradOutput_data = gradOutput_.data_ptr<scalar_t>(); int64_t* indices_data = indices.data_ptr<int64_t>(); // cuda blocks & threads: int blocksH = (int)(16L / sizeD); blocksH = blocksH < 1 ? 1 : blocksH; dim3 blocks(sizeB * sizeD, blocksH); dim3 threads(32, 8); if (atomic) { // run updateGradInput kernel, accumulate gradients atomically hipLaunchKernelGGL(( atomicadaptivemaxgradinput), dim3(blocks), dim3(threads), 0, at::hip::getCurrentHIPStreamMasqueradingAsCUDA(), gradInput_data, gradOutput_data, indices_data, isizeH, isizeW, osizeH, osizeW); C10_HIP_KERNEL_LAUNCH_CHECK(); } else { // run updateGradInput kernel, accumulate gradients atomically hipLaunchKernelGGL(( adaptivemaxgradinput), dim3(blocks), dim3(threads), 0, at::hip::getCurrentHIPStreamMasqueradingAsCUDA(), gradInput_data, gradOutput_data, indices_data, isizeH, isizeW, osizeH, osizeW); C10_HIP_KERNEL_LAUNCH_CHECK(); } }); } } } // at::native } // at
97a1013a140acc19936b5e2135a849dce07f10dc.cu
#include <ATen/ATen.h> #include <ATen/cuda/Atomic.cuh> #include <ATen/cuda/CUDAContext.h> #include <ATen/NativeFunctions.h> #include <ATen/TensorUtils.h> #include <ATen/Utils.h> #include <c10/util/Exception.h> #include <THC/THCGeneral.h> #include <THC/THCNumerics.cuh> #include <algorithm> #include <cfloat> #include <cmath> namespace at { namespace native { namespace { __device__ inline int start_index(int a, int b, int c) { return (int)std::floor((float)(a * c) / b); } __device__ inline int end_index(int a, int b, int c) { return (int)std::ceil((float)((a + 1) * c) / b); } // 4d tensor B x D x H x W /* * Description: * this function adaptively maxpools an input 4D tensor along dimensions 2 and 3 * 4D input, 4D output, 4D argmax x and y */ template <typename T> __global__ void adaptivemaxpool(T *input, T *output, int64_t *indices, int isizeH, int isizeW, int osizeH, int osizeW, int64_t istrideD, int64_t istrideH, int64_t istrideW) { // iterators int oh, ow; // compute offsets based on thread/block ID int o_plane = blockIdx.x; int i_plane = o_plane; int ostartW = threadIdx.x; int oendW = osizeW; const int ostepW = blockDim.x; int ostartH = blockDim.y*blockIdx.y + threadIdx.y; int oendH = osizeH; const int ostepH = blockDim.y*gridDim.y; // select input/output plane output = output + o_plane*osizeH*osizeW; input = input + i_plane*istrideD; indices = indices + o_plane*osizeH*osizeW; // For all output pixels... for(oh = ostartH; oh < oendH; oh += ostepH) { int istartH = start_index(oh, osizeH, isizeH); int iendH = end_index(oh, osizeH, isizeH); int kH = iendH - istartH; for(ow = ostartW; ow < oendW; ow += ostepW) { int istartW = start_index(ow, osizeW, isizeW); int iendW = end_index(ow, osizeW, isizeW); int kW = iendW - istartW; // Compute the mean of the input image... T *ptr_input = input + istartH*istrideH + istartW*istrideW; T *ptr_output = output + oh*osizeW + ow; int64_t *ptr_ind = indices + oh*osizeW + ow; int argmax = istartH * isizeW + istartW; T max = at::numeric_limits<T>::lower_bound(); // -Infinity int ih, iw; for(ih = 0; ih < kH; ih++) { for(iw = 0; iw < kW; iw++) { T val = ptr_input[iw*istrideW]; if ((val > max) || THCNumerics<T>::isnan(val)) { max = val; argmax = (ih+istartH)*isizeW + iw+istartW; } } ptr_input += istrideH; // next input line } // Update output and argmax *ptr_output = max; *ptr_ind = argmax; } } } /* * Description: * this function computes the gradInput from weight and gradOutput */ template <typename T> __global__ void adaptivemaxgradinput(T *gradInput, T *gradOutput, int64_t *indices, int isizeH, int isizeW, int osizeH, int osizeW) { // iterators int oh, ow; // compute offsets based on thread/block ID int o_plane = blockIdx.x; int i_plane = o_plane; //int k = blockIdx.x % sizeD; int ostartW = threadIdx.x; int oendW = osizeW; int ostepW = blockDim.x; int ostartH = blockDim.y*blockIdx.y + threadIdx.y; int oendH = osizeH; int ostepH = blockDim.y*gridDim.y; // select input/output plane gradOutput = gradOutput + o_plane*osizeH*osizeW; gradInput = gradInput + i_plane*isizeH*isizeW; indices = indices + o_plane*osizeH*osizeW; // compute gradInput for(oh = ostartH; oh < oendH; oh += ostepH) { for(ow = ostartW; ow < oendW; ow += ostepW) { T *ptr_gradOutput = gradOutput + oh*osizeW + ow; int64_t *ptr_ind = indices + oh*osizeW + ow; T z = *ptr_gradOutput; int argmax = (*ptr_ind); gradInput[argmax] += z; } } } /* * Description: * this function computes the gradInput from weight and gradOutput * when kH != dH or kW != dW (uses atomic add) */ template <typename T> __global__ void atomicadaptivemaxgradinput( T *gradInput, T *gradOutput, int64_t *indices, int isizeH, int isizeW, int osizeH, int osizeW ) { // iterators int oh, ow; // compute offsets based on thread/block ID int o_plane = blockIdx.x; int i_plane = o_plane; int ostartW = threadIdx.x; int oendW = osizeW; int ostepW = blockDim.x; int ostartH = blockDim.y*blockIdx.y + threadIdx.y; int oendH = osizeH; int ostepH = blockDim.y*gridDim.y; // select input/output plane gradOutput = gradOutput + o_plane*osizeH*osizeW; gradInput = gradInput + i_plane*isizeH*isizeW; indices = indices + o_plane*osizeH*osizeW; // compute gradInput for(oh = ostartH; oh < oendH; oh += ostepH) { for(ow = ostartW; ow < oendW; ow += ostepW) { T *ptr_gradOutput = gradOutput + oh*osizeW + ow; int64_t *ptr_ind = indices + oh*osizeW + ow; T z = *ptr_gradOutput; int argmax = (*ptr_ind); // atomic add since different threads could update same variable gpuAtomicAddNoReturn(&(gradInput[argmax]), z); } } } } // namespace // 4d tensor B x D x H x W TORCH_IMPL_FUNC(adaptive_max_pool2d_out_cuda) (const Tensor& input, IntArrayRef output_size, const Tensor& output, const Tensor& indices) { TensorArg output_arg{output, "output", 1}; TensorArg indices_arg{indices, "indices", 2}; TensorArg input_arg{input, "input", 3}; checkAllSameGPU( __func__, {output_arg, indices_arg, input_arg}); if (input.numel() == 0) { return; } int64_t osizeH = output_size[0]; int64_t osizeW = output_size[1]; if (input.ndimension() == 3) { int64_t sizeD = input.size(0); int64_t isizeH = input.size(1); int64_t isizeW = input.size(2); int64_t istrideD = input.stride(0); int64_t istrideH = input.stride(1); int64_t istrideW = input.stride(2); AT_DISPATCH_FLOATING_TYPES_AND2( kHalf, kBFloat16, input.scalar_type(), "adaptive_max_pool2d_cuda", [&] { scalar_t* input_data = input.data_ptr<scalar_t>(); scalar_t* output_data = output.data_ptr<scalar_t>(); int64_t* indices_data = indices.data_ptr<int64_t>(); // cuda blocks & threads: int blocksH = (int)(16L / sizeD); blocksH = blocksH < 1 ? 1 : blocksH; dim3 blocks(sizeD, blocksH); dim3 threads(32, 8); // run maxpool kernel adaptivemaxpool<<< blocks, threads, 0, at::cuda::getCurrentCUDAStream()>>>( input_data, output_data, indices_data, isizeH, isizeW, osizeH, osizeW, istrideD, istrideH, istrideW); C10_CUDA_KERNEL_LAUNCH_CHECK(); }); } else { Tensor input_ = input.contiguous(); int64_t sizeB = input_.size(0); int64_t sizeD = input_.size(1); int64_t isizeH = input_.size(2); int64_t isizeW = input_.size(3); int64_t istrideD = input_.stride(1); int64_t istrideH = input_.stride(2); int64_t istrideW = input_.stride(3); AT_DISPATCH_FLOATING_TYPES_AND2( kHalf, kBFloat16, input_.scalar_type(), "adaptive_max_pool2d_cuda", [&] { scalar_t* input_data = input_.data_ptr<scalar_t>(); scalar_t* output_data = output.data_ptr<scalar_t>(); int64_t* indices_data = indices.data_ptr<int64_t>(); // cuda blocks & threads: int blocksH = (int)(16L / sizeD); blocksH = blocksH < 1 ? 1 : blocksH; dim3 blocks(sizeB * sizeD, blocksH); dim3 threads(32, 8); // run maxpool kernel adaptivemaxpool<<< blocks, threads, 0, at::cuda::getCurrentCUDAStream()>>>( input_data, output_data, indices_data, isizeH, isizeW, osizeH, osizeW, istrideD, istrideH, istrideW); C10_CUDA_KERNEL_LAUNCH_CHECK(); }); } } TORCH_IMPL_FUNC(adaptive_max_pool2d_backward_out_cuda) (const Tensor& gradOutput, const Tensor& input, const Tensor& indices, const Tensor& gradInput) { globalContext().alertNotDeterministic( "adaptive_max_pool2d_backward_cuda"); TensorArg grad_input_arg{gradInput, "gradInput", 1}; TensorArg grad_output_arg{gradOutput, "gradOutput", 2}; TensorArg input_arg{input, "input", 3}; TensorArg indices_arg{indices, "indices", 4}; checkAllSameGPU( __func__, {grad_input_arg, grad_output_arg, input_arg, indices_arg}); if (gradOutput.numel() == 0) { return; } bool atomic = true; // suboptimal, but without atomic it doesn't pass the tests Tensor gradOutput_ = gradOutput.contiguous(); if (input.ndimension() == 3) { int64_t sizeD = input.size(0); int64_t isizeH = input.size(1); int64_t isizeW = input.size(2); int64_t osizeH = gradOutput_.size(1); int64_t osizeW = gradOutput_.size(2); // bool atomic = (isizeH%osizeH != 0) || (isizeW%osizeW != 0); gradInput.zero_(); AT_DISPATCH_FLOATING_TYPES_AND2( kHalf, kBFloat16, input.scalar_type(), "adaptive_max_pool2d_backward_cuda", [&] { scalar_t* gradInput_data = gradInput.data_ptr<scalar_t>(); scalar_t* gradOutput_data = gradOutput_.data_ptr<scalar_t>(); int64_t* indices_data = indices.data_ptr<int64_t>(); // cuda blocks & threads: int blocksH = (int)(16L / sizeD); blocksH = blocksH < 1 ? 1 : blocksH; dim3 blocks(sizeD, blocksH); dim3 threads(32, 8); if (atomic) { // run updateGradInput kernel, accumulate gradients atomically atomicadaptivemaxgradinput<<< blocks, threads, 0, at::cuda::getCurrentCUDAStream()>>>( gradInput_data, gradOutput_data, indices_data, isizeH, isizeW, osizeH, osizeW); C10_CUDA_KERNEL_LAUNCH_CHECK(); } else { // run updateGradInput kernel atomicadaptivemaxgradinput<<< blocks, threads, 0, at::cuda::getCurrentCUDAStream()>>>( gradInput_data, gradOutput_data, indices_data, isizeH, isizeW, osizeH, osizeW); C10_CUDA_KERNEL_LAUNCH_CHECK(); } }); } else { int64_t sizeB = input.size(0); int64_t sizeD = input.size(1); int64_t isizeH = input.size(2); int64_t isizeW = input.size(3); int64_t osizeH = gradOutput_.size(2); int64_t osizeW = gradOutput_.size(3); gradInput.zero_(); // bool atomic = (isizeH%osizeH != 0) || (isizeW%osizeW != 0); AT_DISPATCH_FLOATING_TYPES_AND2( kHalf, kBFloat16, input.scalar_type(), "adaptive_max_pool2d_backward_cuda", [&] { scalar_t* gradInput_data = gradInput.data_ptr<scalar_t>(); scalar_t* gradOutput_data = gradOutput_.data_ptr<scalar_t>(); int64_t* indices_data = indices.data_ptr<int64_t>(); // cuda blocks & threads: int blocksH = (int)(16L / sizeD); blocksH = blocksH < 1 ? 1 : blocksH; dim3 blocks(sizeB * sizeD, blocksH); dim3 threads(32, 8); if (atomic) { // run updateGradInput kernel, accumulate gradients atomically atomicadaptivemaxgradinput<<< blocks, threads, 0, at::cuda::getCurrentCUDAStream()>>>( gradInput_data, gradOutput_data, indices_data, isizeH, isizeW, osizeH, osizeW); C10_CUDA_KERNEL_LAUNCH_CHECK(); } else { // run updateGradInput kernel, accumulate gradients atomically adaptivemaxgradinput<<< blocks, threads, 0, at::cuda::getCurrentCUDAStream()>>>( gradInput_data, gradOutput_data, indices_data, isizeH, isizeW, osizeH, osizeW); C10_CUDA_KERNEL_LAUNCH_CHECK(); } }); } } } // at::native } // at
52caebcfc1ab93118b75f715d5798a7a079f9876.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include "device_launch_parameters.h" #include "headers.h" __managed__ int Dev_A; __managed__ int Dev_B; __managed__ int Dev_size; #define TILE_SIZE 4 #define WINDOW_SIZE 3 __global__ void histogram_gray_sacale_CUDA(unsigned char* Image, int* Histogram) { int x = blockIdx.x; int y = blockIdx.y; int Image_Idx = x + y * gridDim.x; atomicAdd(&Histogram[Image[Image_Idx]], 1); } void histogram_calculation_gray_scale_CUDA(unsigned char* Image, int Height, int Width, int Channels, int* Histogram) { unsigned char* Dev_Image = NULL; int* Dev_Histogram = NULL; //allocate cuda variable memory hipMalloc((void**)&Dev_Image, Height * Width * Channels); hipMalloc((void**)&Dev_Histogram, 256 * sizeof(int)); //copy CPU data to GPU hipMemcpy(Dev_Image, Image, Height * Width * Channels, hipMemcpyHostToDevice); hipMemcpy(Dev_Histogram, Histogram, 256 * sizeof(int), hipMemcpyHostToDevice); dim3 Grid_Image(Width, Height); histogram_gray_sacale_CUDA << <Grid_Image, 1 >> > (Dev_Image, Dev_Histogram); //copy memory back to CPU from GPU hipMemcpy(Histogram, Dev_Histogram, 256 * sizeof(int), hipMemcpyDeviceToHost); //free up the memory of GPU hipFree(Dev_Histogram); hipFree(Dev_Image); } __global__ void histogram_RGB_CUDA(unsigned char* Image, int Channels, int* Histogram_Blue, int* Histogram_Green, int* Histogram_Red) { int x = blockIdx.x; int y = blockIdx.y; int Image_Idx = (x + y * gridDim.x) * Channels; atomicAdd(&Histogram_Blue[Image[Image_Idx]], 1); atomicAdd(&Histogram_Green[Image[Image_Idx + 1]], 1); atomicAdd(&Histogram_Red[Image[Image_Idx + 2]], 1); } void histogram_calculation_RGB_CUDA(unsigned char* Image, int Height, int Width, int Channels, int* Histogram_Blue, int* Histogram_Green, int* Histogram_Red) { unsigned char* Dev_Image = NULL; int* Dev_Histogram_Blue = NULL; int* Dev_Histogram_Green = NULL; int* Dev_Histogram_Red = NULL; //allocate cuda variable memory hipMalloc((void**)&Dev_Image, Height * Width * Channels); hipMalloc((void**)&Dev_Histogram_Blue, 256 * sizeof(int)); hipMalloc((void**)&Dev_Histogram_Green, 256 * sizeof(int)); hipMalloc((void**)&Dev_Histogram_Red, 256 * sizeof(int)); //copy CPU data to GPU hipMemcpy(Dev_Image, Image, Height * Width * Channels, hipMemcpyHostToDevice); hipMemcpy(Dev_Histogram_Blue, Histogram_Blue, 256 * sizeof(int), hipMemcpyHostToDevice); hipMemcpy(Dev_Histogram_Green, Histogram_Green, 256 * sizeof(int), hipMemcpyHostToDevice); hipMemcpy(Dev_Histogram_Red, Histogram_Red, 256 * sizeof(int), hipMemcpyHostToDevice); dim3 Grid_Image(Width, Height); histogram_RGB_CUDA << <Grid_Image, 1 >> > (Dev_Image, Channels, Dev_Histogram_Blue, Dev_Histogram_Green, Dev_Histogram_Red); //copy memory back to CPU from GPU hipMemcpy(Histogram_Blue, Dev_Histogram_Blue, 256 * sizeof(int), hipMemcpyDeviceToHost); hipMemcpy(Histogram_Green, Dev_Histogram_Green, 256 * sizeof(int), hipMemcpyDeviceToHost); hipMemcpy(Histogram_Red, Dev_Histogram_Red, 256 * sizeof(int), hipMemcpyDeviceToHost); //free up the memory of GPU hipFree(Dev_Histogram_Blue); hipFree(Dev_Histogram_Green); hipFree(Dev_Histogram_Red); hipFree(Dev_Image); } __global__ void equalization_CUDA(unsigned char* Image, unsigned char* Image_eq, int * hist_func) { int x = blockIdx.x; int y = blockIdx.y; int Image_Idx = x + y * gridDim.x; Image_eq[Image_Idx] = (unsigned char)hist_func[Image[Image_Idx]]; } void image_equalization_gray_scale_CUDA(unsigned char* Image, int Height, int Width, int Channels, unsigned char* image_equalized, int * hist_function) { unsigned char* Dev_Image = NULL; unsigned char* Dev_Image_eq = NULL; int* Dev_Histogram = NULL; //allocate cuda variable memory hipMalloc((void**)&Dev_Image, Height * Width * Channels); hipMalloc((void**)&Dev_Image_eq, Height * Width * Channels); hipMalloc((void**)&Dev_Histogram, 256 * sizeof(int)); //copy CPU data to GPU hipMemcpy(Dev_Image, Image, Height * Width * Channels, hipMemcpyHostToDevice); hipMemcpy(Dev_Image_eq, Image, Height * Width * Channels, hipMemcpyHostToDevice); hipMemcpy(Dev_Histogram, hist_function, 256 * sizeof(int), hipMemcpyHostToDevice); dim3 Grid_Image(Width, Height); equalization_CUDA << <Grid_Image, 1 >> > (Dev_Image, Dev_Image_eq, Dev_Histogram); //copy memory back to CPU from GPU hipMemcpy(image_equalized, Dev_Image_eq, Height * Width * Channels, hipMemcpyDeviceToHost); //free up the memory of GPU hipFree(Dev_Image_eq); hipFree(Dev_Image); } __global__ void equalization_RGB_CUDA(unsigned char* Image, unsigned char* Image_eq, int Channels, int* f_Histogram_Blue, int* f_Histogram_Green, int* f_Histogram_Red) { int x = blockIdx.x; int y = blockIdx.y; int Image_Idx = (x + y * gridDim.x) * Channels; Image_eq[Image_Idx] = (unsigned char)f_Histogram_Blue[Image[Image_Idx]]; Image_eq[Image_Idx+1] = (unsigned char)f_Histogram_Green[Image[Image_Idx+1]]; Image_eq[Image_Idx+2] = (unsigned char)f_Histogram_Red[Image[Image_Idx+2]]; } void image_equalization_RGB_CUDA(unsigned char* Image, int Height, int Width, int Channels, unsigned char* image_equalized, int* f_Histogram_Blue, int* f_Histogram_Green, int* f_Histogram_Red) { unsigned char* Dev_Image = NULL; unsigned char* Dev_Image_eq = NULL; int* Dev_Histogram_Blue = NULL; int* Dev_Histogram_Green = NULL; int* Dev_Histogram_Red = NULL; //allocate cuda variable memory hipMalloc((void**)&Dev_Image, Height * Width * Channels); hipMalloc((void**)&Dev_Image_eq, Height * Width * Channels); hipMalloc((void**)&Dev_Histogram_Blue, 256 * sizeof(int)); hipMalloc((void**)&Dev_Histogram_Green, 256 * sizeof(int)); hipMalloc((void**)&Dev_Histogram_Red, 256 * sizeof(int)); //copy CPU data to GPU hipMemcpy(Dev_Image, Image, Height * Width * Channels, hipMemcpyHostToDevice); hipMemcpy(Dev_Image_eq, Image, Height * Width * Channels, hipMemcpyHostToDevice); hipMemcpy(Dev_Histogram_Blue, f_Histogram_Blue, 256 * sizeof(int), hipMemcpyHostToDevice); hipMemcpy(Dev_Histogram_Green, f_Histogram_Green, 256 * sizeof(int), hipMemcpyHostToDevice); hipMemcpy(Dev_Histogram_Red, f_Histogram_Red, 256 * sizeof(int), hipMemcpyHostToDevice); dim3 Grid_Image(Width, Height); equalization_RGB_CUDA << <Grid_Image, 1 >> > (Dev_Image, Dev_Image_eq, Channels, Dev_Histogram_Blue, Dev_Histogram_Green, Dev_Histogram_Red); //copy memory back to CPU from GPU hipMemcpy(image_equalized, Dev_Image_eq, Height * Width * Channels, hipMemcpyDeviceToHost); //free up the memory of GPU hipFree(Dev_Image_eq); hipFree(Dev_Image); } __global__ void global_function_CUDA(unsigned char* Image, unsigned char* Image_eq) { int x = blockIdx.x; int y = blockIdx.y; int Image_Idx = x + y * gridDim.x; int new_value = Dev_A * Image[Image_Idx] + Dev_B; if (new_value > 255) new_value = 255; Image_eq[Image_Idx] = new_value; } void global_function_gray_scale_CUDA(unsigned char* Image, int Height, int Width, int Channels, unsigned char* image_equalized, int A, int B) { Dev_A = A; Dev_B = B; unsigned char* Dev_Image = NULL; unsigned char* Dev_Image_eq = NULL; //allocate cuda variable memory hipMalloc((void**)&Dev_Image, Height * Width * Channels); hipMalloc((void**)&Dev_Image_eq, Height * Width * Channels); //copy CPU data to GPU hipMemcpy(Dev_Image, Image, Height * Width * Channels, hipMemcpyHostToDevice); hipMemcpy(Dev_Image_eq, Image, Height * Width * Channels, hipMemcpyHostToDevice); dim3 Grid_Image(Width, Height); global_function_CUDA << <Grid_Image, 1 >> > (Dev_Image, Dev_Image_eq); //copy memory back to CPU from GPU hipMemcpy(image_equalized, Dev_Image_eq, Height * Width * Channels, hipMemcpyDeviceToHost); //free up the memory of GPU hipFree(Dev_Image_eq); hipFree(Dev_Image); } __global__ void global_function_RGB_CUDA(unsigned char* Image, unsigned char* Image_eq, int Channels) { int x = blockIdx.x; int y = blockIdx.y; int Image_Idx = (x + y * gridDim.x)* Channels; int new_valueB = Dev_A * Image[Image_Idx] + Dev_B; int new_valueG = Dev_A * Image[Image_Idx+1] + Dev_B; int new_valueR = Dev_A * Image[Image_Idx+2] + Dev_B; if (new_valueB > 255) new_valueB = 255; if (new_valueG > 255) new_valueG = 255; if (new_valueR > 255) new_valueR = 255; Image_eq[Image_Idx] = new_valueB; Image_eq[Image_Idx+1] = new_valueG; Image_eq[Image_Idx+2] = new_valueR; } void global_function_RGB_CUDA(unsigned char* Image, int Height, int Width, int Channels, unsigned char* image_equalized, int A, int B) { Dev_A = A; Dev_B = B; unsigned char* Dev_Image = NULL; unsigned char* Dev_Image_eq = NULL; //allocate cuda variable memory hipMalloc((void**)&Dev_Image, Height * Width * Channels); hipMalloc((void**)&Dev_Image_eq, Height * Width * Channels); //copy CPU data to GPU hipMemcpy(Dev_Image, Image, Height * Width * Channels, hipMemcpyHostToDevice); hipMemcpy(Dev_Image_eq, Image, Height * Width * Channels, hipMemcpyHostToDevice); dim3 Grid_Image(Width, Height); global_function_RGB_CUDA << <Grid_Image, 1 >> > (Dev_Image, Dev_Image_eq, Channels); //copy memory back to CPU from GPU hipMemcpy(image_equalized, Dev_Image_eq, Height * Width * Channels, hipMemcpyDeviceToHost); //free up the memory of GPU hipFree(Dev_Image_eq); hipFree(Dev_Image); } __global__ void addition_arithmetic_operations_CUDA(unsigned char* Image1, unsigned char* Image2, unsigned char* Image_res, int Channels) { int x = blockIdx.x; int y = blockIdx.y; int Image_Idx = (x + y * gridDim.x) * Channels; int new_valueB = (Image1[Image_Idx] + Image2[Image_Idx])/2; int new_valueG = (Image1[Image_Idx+1] + Image2[Image_Idx+1])/2; int new_valueR = (Image1[Image_Idx+2] + Image2[Image_Idx+2])/2; Image_res[Image_Idx] = new_valueB; Image_res[Image_Idx+1] = new_valueG; Image_res[Image_Idx+2] = new_valueR; } __global__ void substraction_arithmetic_operations_CUDA(unsigned char* Image1, unsigned char* Image2, unsigned char* Image_res, int Channels) { int x = blockIdx.x; int y = blockIdx.y; int Image_Idx = (x + y * gridDim.x) * Channels; int new_valueB = ((Image1[Image_Idx] - Image2[Image_Idx]) / 2 ) + 128; int new_valueG = ((Image1[Image_Idx + 1] - Image2[Image_Idx + 1]) / 2) + 128; int new_valueR = ((Image1[Image_Idx + 2] - Image2[Image_Idx + 2]) / 2) + 128; Image_res[Image_Idx] = new_valueB; Image_res[Image_Idx + 1] = new_valueG; Image_res[Image_Idx + 2] = new_valueR; } __global__ void multiplication_arithmetic_operations_CUDA(unsigned char* Image1, unsigned char* Image2, unsigned char* Image_res, int Channels) { int x = blockIdx.x; int y = blockIdx.y; int Image_Idx = (x + y * gridDim.x) * Channels; int new_valueB = (Image1[Image_Idx] * Image2[Image_Idx]) / 255; int new_valueG = (Image1[Image_Idx + 1] * Image2[Image_Idx + 1]) / 255; int new_valueR = (Image1[Image_Idx + 2] * Image2[Image_Idx + 2]) / 255; Image_res[Image_Idx] = new_valueB; Image_res[Image_Idx + 1] = new_valueG; Image_res[Image_Idx + 2] = new_valueR; } __global__ void division_arithmetic_operations_CUDA(unsigned char* Image1, unsigned char* Image2, unsigned char* Image_res, int Channels) { int x = blockIdx.x; int y = blockIdx.y; int Image_Idx = (x + y * gridDim.x) * Channels; int new_valueB = (Image1[Image_Idx] / Image2[Image_Idx]) * 255; int new_valueG = (Image1[Image_Idx + 1] / Image2[Image_Idx + 1]) * 255; int new_valueR = (Image1[Image_Idx + 2] / Image2[Image_Idx + 2]) * 255; if (new_valueB > 255) new_valueB = 255; if (new_valueG > 255) new_valueG = 255; if (new_valueR > 255) new_valueR = 255; Image_res[Image_Idx] = new_valueB; Image_res[Image_Idx + 1] = new_valueG; Image_res[Image_Idx + 2] = new_valueR; } void arithmetic_operations_CUDA(unsigned char* Image1, unsigned char* Image2, int Height, int Width, int Channels, unsigned char* image_res, int type_operation) { unsigned char* Dev_Image1 = NULL; unsigned char* Dev_Image2 = NULL; unsigned char* Dev_Image_res = NULL; //allocate cuda variable memory hipMalloc((void**)&Dev_Image1, Height * Width * Channels); hipMalloc((void**)&Dev_Image2, Height * Width * Channels); hipMalloc((void**)&Dev_Image_res, Height * Width * Channels); //copy CPU data to GPU hipMemcpy(Dev_Image1, Image1, Height * Width * Channels, hipMemcpyHostToDevice); hipMemcpy(Dev_Image2, Image2, Height * Width * Channels, hipMemcpyHostToDevice); hipMemcpy(Dev_Image_res, image_res, Height * Width * Channels, hipMemcpyHostToDevice); dim3 Grid_Image(Width, Height); switch (type_operation) { case 1: addition_arithmetic_operations_CUDA << <Grid_Image, 1 >> > (Dev_Image1, Dev_Image2, Dev_Image_res, Channels); break; case 2: substraction_arithmetic_operations_CUDA << <Grid_Image, 1 >> > (Dev_Image1, Dev_Image2, Dev_Image_res, Channels); break; case 3: multiplication_arithmetic_operations_CUDA << <Grid_Image, 1 >> > (Dev_Image1, Dev_Image2, Dev_Image_res, Channels); break; case 4: division_arithmetic_operations_CUDA << <Grid_Image, 1 >> > (Dev_Image1, Dev_Image2, Dev_Image_res, Channels); break; default: break; } //copy memory back to CPU from GPU hipMemcpy(image_res, Dev_Image_res, Height * Width * Channels, hipMemcpyDeviceToHost); //free up the memory of GPU hipFree(Dev_Image1); hipFree(Dev_Image2); hipFree(Dev_Image_res); } __global__ void kernel_convolution_CUDA(unsigned char* Image, unsigned char* Image_res, int Channels) { int x = blockIdx.x; int y = blockIdx.y; int Image_Idx = (x + y * gridDim.x)* Channels; int new_valueB = Dev_A * Image[Image_Idx] + Dev_B; int new_valueG = Dev_A * Image[Image_Idx + 1] + Dev_B; int new_valueR = Dev_A * Image[Image_Idx + 2] + Dev_B; Image_res[Image_Idx] = new_valueB; Image_res[Image_Idx + 1] = new_valueG; Image_res[Image_Idx + 2] = new_valueR; } __global__ void kernel_median_CUDA(unsigned char *Image, unsigned char *Image_res, int imageWidth, int imageHeight) { // Set row and colum for thread. int Idy = blockIdx.y * blockDim.y + threadIdx.y; int Idx = blockIdx.x * blockDim.x + threadIdx.x; int sum = 0; if ((Idy == 0) || (Idx == 0) || (Idy == imageHeight - 1) || (Idx == imageWidth - 1)) Image_res[Idy*imageWidth + Idx] = 0; //Deal with boundry conditions else { for (int x = 0; x < WINDOW_SIZE; x++) { for (int y = 0; y < WINDOW_SIZE; y++) { sum+= Image[(Idy + x - 1)*imageWidth + (Idx + y - 1)]; } } Image_res[Idy*imageWidth + Idx] = sum/9; } } __global__ void kernel_sobel_CUDA(unsigned char * Image, unsigned char *Image_res, const unsigned int width, const unsigned int height) { int x = threadIdx.x + blockIdx.x * blockDim.x; int y = threadIdx.y + blockIdx.y * blockDim.y; float dx, dy; if (x > 0 && y > 0 && x < width - 1 && y < height - 1) { dx = (-1 * Image[(y - 1)*width + (x - 1)]) + (-2 * Image[y*width + (x - 1)]) + (-1 * Image[(y + 1)*width + (x - 1)]) + (Image[(y - 1)*width + (x + 1)]) + (2 * Image[y*width + (x + 1)]) + (Image[(y + 1)*width + (x + 1)]); dy = (Image[(y - 1)*width + (x - 1)]) + (2 * Image[(y - 1)*width + x]) + (Image[(y - 1)*width + (x + 1)]) + (-1 * Image[(y + 1)*width + (x - 1)]) + (-2 * Image[(y + 1)*width + x]) + (-1 * Image[(y + 1)*width + (x + 1)]); Image_res[y*width + x] = sqrt((dx*dx) + (dy*dy)); } } void convolution_gray_scale_CUDA(unsigned char* Image, int Height, int Width, int Channels, unsigned char* image_res, int type_convolution) { unsigned char* Dev_Image = NULL; unsigned char* Dev_Image_res = NULL; //allocate cuda variable memory hipMalloc((void**)&Dev_Image, Height * Width * Channels); hipMalloc((void**)&Dev_Image_res, Height * Width * Channels); //copy CPU data to GPU hipMemcpy(Dev_Image, Image, Height * Width * Channels, hipMemcpyHostToDevice); hipMemcpy(Dev_Image_res, Image, Height * Width * Channels, hipMemcpyHostToDevice); dim3 dimBlock(TILE_SIZE, TILE_SIZE); dim3 Grid_Image((int)ceil(Width/ TILE_SIZE), (int)ceil(Height/ TILE_SIZE)); switch (type_convolution) { case 1: kernel_median_CUDA << <Grid_Image, dimBlock >> > (Dev_Image, Dev_Image_res, Width, Height); break; case 2: kernel_sobel_CUDA << <Grid_Image, dimBlock >> > (Dev_Image, Dev_Image_res, Width, Height); break; default: break; } //copy memory back to CPU from GPU hipMemcpy(image_res, Dev_Image_res, Height * Width * Channels, hipMemcpyDeviceToHost); //free up the memory of GPU hipFree(Dev_Image_res); hipFree(Dev_Image); } __global__ void kernel_bilinear_interpolation_CUDA(unsigned char* Image, unsigned char* Image_eq) { int x = blockIdx.x; int y = blockIdx.y; int Image_Idx = x + y * gridDim.x; int Image_Idx_ori = x / Dev_size + y * gridDim.x / Dev_size; Image_eq[Image_Idx] = Image[Image_Idx_ori]; } void bilinear_interpolation_CUDA(unsigned char* Image, int Height, int Width, int Channels, unsigned char* image_res, int size) { Dev_size = size; unsigned char* Dev_Image = NULL; unsigned char* Dev_Image_res = NULL; //allocate cuda variable memory hipMalloc((void**)&Dev_Image, Height * Width * Channels); hipMalloc((void**)&Dev_Image_res, Height * Width * size * Channels); //copy CPU data to GPU hipMemcpy(Dev_Image, Image, Height * Width * Channels, hipMemcpyHostToDevice); hipMemcpy(Dev_Image_res, image_res, Height * Width *size * Channels, hipMemcpyHostToDevice); dim3 Grid_Image(Width*size, Height*size); dim3 dimBlock(TILE_SIZE, TILE_SIZE); kernel_bilinear_interpolation_CUDA << <Grid_Image, dimBlock >> > (Dev_Image, Dev_Image_res); //copy memory back to CPU from GPU hipMemcpy(image_res, Dev_Image_res, Height * Width * Channels, hipMemcpyDeviceToHost); //free up the memory of GPU hipFree(Dev_Image_res); hipFree(Dev_Image); }
52caebcfc1ab93118b75f715d5798a7a079f9876.cu
#include "cuda_runtime.h" #include "device_launch_parameters.h" #include "headers.h" __managed__ int Dev_A; __managed__ int Dev_B; __managed__ int Dev_size; #define TILE_SIZE 4 #define WINDOW_SIZE 3 __global__ void histogram_gray_sacale_CUDA(unsigned char* Image, int* Histogram) { int x = blockIdx.x; int y = blockIdx.y; int Image_Idx = x + y * gridDim.x; atomicAdd(&Histogram[Image[Image_Idx]], 1); } void histogram_calculation_gray_scale_CUDA(unsigned char* Image, int Height, int Width, int Channels, int* Histogram) { unsigned char* Dev_Image = NULL; int* Dev_Histogram = NULL; //allocate cuda variable memory cudaMalloc((void**)&Dev_Image, Height * Width * Channels); cudaMalloc((void**)&Dev_Histogram, 256 * sizeof(int)); //copy CPU data to GPU cudaMemcpy(Dev_Image, Image, Height * Width * Channels, cudaMemcpyHostToDevice); cudaMemcpy(Dev_Histogram, Histogram, 256 * sizeof(int), cudaMemcpyHostToDevice); dim3 Grid_Image(Width, Height); histogram_gray_sacale_CUDA << <Grid_Image, 1 >> > (Dev_Image, Dev_Histogram); //copy memory back to CPU from GPU cudaMemcpy(Histogram, Dev_Histogram, 256 * sizeof(int), cudaMemcpyDeviceToHost); //free up the memory of GPU cudaFree(Dev_Histogram); cudaFree(Dev_Image); } __global__ void histogram_RGB_CUDA(unsigned char* Image, int Channels, int* Histogram_Blue, int* Histogram_Green, int* Histogram_Red) { int x = blockIdx.x; int y = blockIdx.y; int Image_Idx = (x + y * gridDim.x) * Channels; atomicAdd(&Histogram_Blue[Image[Image_Idx]], 1); atomicAdd(&Histogram_Green[Image[Image_Idx + 1]], 1); atomicAdd(&Histogram_Red[Image[Image_Idx + 2]], 1); } void histogram_calculation_RGB_CUDA(unsigned char* Image, int Height, int Width, int Channels, int* Histogram_Blue, int* Histogram_Green, int* Histogram_Red) { unsigned char* Dev_Image = NULL; int* Dev_Histogram_Blue = NULL; int* Dev_Histogram_Green = NULL; int* Dev_Histogram_Red = NULL; //allocate cuda variable memory cudaMalloc((void**)&Dev_Image, Height * Width * Channels); cudaMalloc((void**)&Dev_Histogram_Blue, 256 * sizeof(int)); cudaMalloc((void**)&Dev_Histogram_Green, 256 * sizeof(int)); cudaMalloc((void**)&Dev_Histogram_Red, 256 * sizeof(int)); //copy CPU data to GPU cudaMemcpy(Dev_Image, Image, Height * Width * Channels, cudaMemcpyHostToDevice); cudaMemcpy(Dev_Histogram_Blue, Histogram_Blue, 256 * sizeof(int), cudaMemcpyHostToDevice); cudaMemcpy(Dev_Histogram_Green, Histogram_Green, 256 * sizeof(int), cudaMemcpyHostToDevice); cudaMemcpy(Dev_Histogram_Red, Histogram_Red, 256 * sizeof(int), cudaMemcpyHostToDevice); dim3 Grid_Image(Width, Height); histogram_RGB_CUDA << <Grid_Image, 1 >> > (Dev_Image, Channels, Dev_Histogram_Blue, Dev_Histogram_Green, Dev_Histogram_Red); //copy memory back to CPU from GPU cudaMemcpy(Histogram_Blue, Dev_Histogram_Blue, 256 * sizeof(int), cudaMemcpyDeviceToHost); cudaMemcpy(Histogram_Green, Dev_Histogram_Green, 256 * sizeof(int), cudaMemcpyDeviceToHost); cudaMemcpy(Histogram_Red, Dev_Histogram_Red, 256 * sizeof(int), cudaMemcpyDeviceToHost); //free up the memory of GPU cudaFree(Dev_Histogram_Blue); cudaFree(Dev_Histogram_Green); cudaFree(Dev_Histogram_Red); cudaFree(Dev_Image); } __global__ void equalization_CUDA(unsigned char* Image, unsigned char* Image_eq, int * hist_func) { int x = blockIdx.x; int y = blockIdx.y; int Image_Idx = x + y * gridDim.x; Image_eq[Image_Idx] = (unsigned char)hist_func[Image[Image_Idx]]; } void image_equalization_gray_scale_CUDA(unsigned char* Image, int Height, int Width, int Channels, unsigned char* image_equalized, int * hist_function) { unsigned char* Dev_Image = NULL; unsigned char* Dev_Image_eq = NULL; int* Dev_Histogram = NULL; //allocate cuda variable memory cudaMalloc((void**)&Dev_Image, Height * Width * Channels); cudaMalloc((void**)&Dev_Image_eq, Height * Width * Channels); cudaMalloc((void**)&Dev_Histogram, 256 * sizeof(int)); //copy CPU data to GPU cudaMemcpy(Dev_Image, Image, Height * Width * Channels, cudaMemcpyHostToDevice); cudaMemcpy(Dev_Image_eq, Image, Height * Width * Channels, cudaMemcpyHostToDevice); cudaMemcpy(Dev_Histogram, hist_function, 256 * sizeof(int), cudaMemcpyHostToDevice); dim3 Grid_Image(Width, Height); equalization_CUDA << <Grid_Image, 1 >> > (Dev_Image, Dev_Image_eq, Dev_Histogram); //copy memory back to CPU from GPU cudaMemcpy(image_equalized, Dev_Image_eq, Height * Width * Channels, cudaMemcpyDeviceToHost); //free up the memory of GPU cudaFree(Dev_Image_eq); cudaFree(Dev_Image); } __global__ void equalization_RGB_CUDA(unsigned char* Image, unsigned char* Image_eq, int Channels, int* f_Histogram_Blue, int* f_Histogram_Green, int* f_Histogram_Red) { int x = blockIdx.x; int y = blockIdx.y; int Image_Idx = (x + y * gridDim.x) * Channels; Image_eq[Image_Idx] = (unsigned char)f_Histogram_Blue[Image[Image_Idx]]; Image_eq[Image_Idx+1] = (unsigned char)f_Histogram_Green[Image[Image_Idx+1]]; Image_eq[Image_Idx+2] = (unsigned char)f_Histogram_Red[Image[Image_Idx+2]]; } void image_equalization_RGB_CUDA(unsigned char* Image, int Height, int Width, int Channels, unsigned char* image_equalized, int* f_Histogram_Blue, int* f_Histogram_Green, int* f_Histogram_Red) { unsigned char* Dev_Image = NULL; unsigned char* Dev_Image_eq = NULL; int* Dev_Histogram_Blue = NULL; int* Dev_Histogram_Green = NULL; int* Dev_Histogram_Red = NULL; //allocate cuda variable memory cudaMalloc((void**)&Dev_Image, Height * Width * Channels); cudaMalloc((void**)&Dev_Image_eq, Height * Width * Channels); cudaMalloc((void**)&Dev_Histogram_Blue, 256 * sizeof(int)); cudaMalloc((void**)&Dev_Histogram_Green, 256 * sizeof(int)); cudaMalloc((void**)&Dev_Histogram_Red, 256 * sizeof(int)); //copy CPU data to GPU cudaMemcpy(Dev_Image, Image, Height * Width * Channels, cudaMemcpyHostToDevice); cudaMemcpy(Dev_Image_eq, Image, Height * Width * Channels, cudaMemcpyHostToDevice); cudaMemcpy(Dev_Histogram_Blue, f_Histogram_Blue, 256 * sizeof(int), cudaMemcpyHostToDevice); cudaMemcpy(Dev_Histogram_Green, f_Histogram_Green, 256 * sizeof(int), cudaMemcpyHostToDevice); cudaMemcpy(Dev_Histogram_Red, f_Histogram_Red, 256 * sizeof(int), cudaMemcpyHostToDevice); dim3 Grid_Image(Width, Height); equalization_RGB_CUDA << <Grid_Image, 1 >> > (Dev_Image, Dev_Image_eq, Channels, Dev_Histogram_Blue, Dev_Histogram_Green, Dev_Histogram_Red); //copy memory back to CPU from GPU cudaMemcpy(image_equalized, Dev_Image_eq, Height * Width * Channels, cudaMemcpyDeviceToHost); //free up the memory of GPU cudaFree(Dev_Image_eq); cudaFree(Dev_Image); } __global__ void global_function_CUDA(unsigned char* Image, unsigned char* Image_eq) { int x = blockIdx.x; int y = blockIdx.y; int Image_Idx = x + y * gridDim.x; int new_value = Dev_A * Image[Image_Idx] + Dev_B; if (new_value > 255) new_value = 255; Image_eq[Image_Idx] = new_value; } void global_function_gray_scale_CUDA(unsigned char* Image, int Height, int Width, int Channels, unsigned char* image_equalized, int A, int B) { Dev_A = A; Dev_B = B; unsigned char* Dev_Image = NULL; unsigned char* Dev_Image_eq = NULL; //allocate cuda variable memory cudaMalloc((void**)&Dev_Image, Height * Width * Channels); cudaMalloc((void**)&Dev_Image_eq, Height * Width * Channels); //copy CPU data to GPU cudaMemcpy(Dev_Image, Image, Height * Width * Channels, cudaMemcpyHostToDevice); cudaMemcpy(Dev_Image_eq, Image, Height * Width * Channels, cudaMemcpyHostToDevice); dim3 Grid_Image(Width, Height); global_function_CUDA << <Grid_Image, 1 >> > (Dev_Image, Dev_Image_eq); //copy memory back to CPU from GPU cudaMemcpy(image_equalized, Dev_Image_eq, Height * Width * Channels, cudaMemcpyDeviceToHost); //free up the memory of GPU cudaFree(Dev_Image_eq); cudaFree(Dev_Image); } __global__ void global_function_RGB_CUDA(unsigned char* Image, unsigned char* Image_eq, int Channels) { int x = blockIdx.x; int y = blockIdx.y; int Image_Idx = (x + y * gridDim.x)* Channels; int new_valueB = Dev_A * Image[Image_Idx] + Dev_B; int new_valueG = Dev_A * Image[Image_Idx+1] + Dev_B; int new_valueR = Dev_A * Image[Image_Idx+2] + Dev_B; if (new_valueB > 255) new_valueB = 255; if (new_valueG > 255) new_valueG = 255; if (new_valueR > 255) new_valueR = 255; Image_eq[Image_Idx] = new_valueB; Image_eq[Image_Idx+1] = new_valueG; Image_eq[Image_Idx+2] = new_valueR; } void global_function_RGB_CUDA(unsigned char* Image, int Height, int Width, int Channels, unsigned char* image_equalized, int A, int B) { Dev_A = A; Dev_B = B; unsigned char* Dev_Image = NULL; unsigned char* Dev_Image_eq = NULL; //allocate cuda variable memory cudaMalloc((void**)&Dev_Image, Height * Width * Channels); cudaMalloc((void**)&Dev_Image_eq, Height * Width * Channels); //copy CPU data to GPU cudaMemcpy(Dev_Image, Image, Height * Width * Channels, cudaMemcpyHostToDevice); cudaMemcpy(Dev_Image_eq, Image, Height * Width * Channels, cudaMemcpyHostToDevice); dim3 Grid_Image(Width, Height); global_function_RGB_CUDA << <Grid_Image, 1 >> > (Dev_Image, Dev_Image_eq, Channels); //copy memory back to CPU from GPU cudaMemcpy(image_equalized, Dev_Image_eq, Height * Width * Channels, cudaMemcpyDeviceToHost); //free up the memory of GPU cudaFree(Dev_Image_eq); cudaFree(Dev_Image); } __global__ void addition_arithmetic_operations_CUDA(unsigned char* Image1, unsigned char* Image2, unsigned char* Image_res, int Channels) { int x = blockIdx.x; int y = blockIdx.y; int Image_Idx = (x + y * gridDim.x) * Channels; int new_valueB = (Image1[Image_Idx] + Image2[Image_Idx])/2; int new_valueG = (Image1[Image_Idx+1] + Image2[Image_Idx+1])/2; int new_valueR = (Image1[Image_Idx+2] + Image2[Image_Idx+2])/2; Image_res[Image_Idx] = new_valueB; Image_res[Image_Idx+1] = new_valueG; Image_res[Image_Idx+2] = new_valueR; } __global__ void substraction_arithmetic_operations_CUDA(unsigned char* Image1, unsigned char* Image2, unsigned char* Image_res, int Channels) { int x = blockIdx.x; int y = blockIdx.y; int Image_Idx = (x + y * gridDim.x) * Channels; int new_valueB = ((Image1[Image_Idx] - Image2[Image_Idx]) / 2 ) + 128; int new_valueG = ((Image1[Image_Idx + 1] - Image2[Image_Idx + 1]) / 2) + 128; int new_valueR = ((Image1[Image_Idx + 2] - Image2[Image_Idx + 2]) / 2) + 128; Image_res[Image_Idx] = new_valueB; Image_res[Image_Idx + 1] = new_valueG; Image_res[Image_Idx + 2] = new_valueR; } __global__ void multiplication_arithmetic_operations_CUDA(unsigned char* Image1, unsigned char* Image2, unsigned char* Image_res, int Channels) { int x = blockIdx.x; int y = blockIdx.y; int Image_Idx = (x + y * gridDim.x) * Channels; int new_valueB = (Image1[Image_Idx] * Image2[Image_Idx]) / 255; int new_valueG = (Image1[Image_Idx + 1] * Image2[Image_Idx + 1]) / 255; int new_valueR = (Image1[Image_Idx + 2] * Image2[Image_Idx + 2]) / 255; Image_res[Image_Idx] = new_valueB; Image_res[Image_Idx + 1] = new_valueG; Image_res[Image_Idx + 2] = new_valueR; } __global__ void division_arithmetic_operations_CUDA(unsigned char* Image1, unsigned char* Image2, unsigned char* Image_res, int Channels) { int x = blockIdx.x; int y = blockIdx.y; int Image_Idx = (x + y * gridDim.x) * Channels; int new_valueB = (Image1[Image_Idx] / Image2[Image_Idx]) * 255; int new_valueG = (Image1[Image_Idx + 1] / Image2[Image_Idx + 1]) * 255; int new_valueR = (Image1[Image_Idx + 2] / Image2[Image_Idx + 2]) * 255; if (new_valueB > 255) new_valueB = 255; if (new_valueG > 255) new_valueG = 255; if (new_valueR > 255) new_valueR = 255; Image_res[Image_Idx] = new_valueB; Image_res[Image_Idx + 1] = new_valueG; Image_res[Image_Idx + 2] = new_valueR; } void arithmetic_operations_CUDA(unsigned char* Image1, unsigned char* Image2, int Height, int Width, int Channels, unsigned char* image_res, int type_operation) { unsigned char* Dev_Image1 = NULL; unsigned char* Dev_Image2 = NULL; unsigned char* Dev_Image_res = NULL; //allocate cuda variable memory cudaMalloc((void**)&Dev_Image1, Height * Width * Channels); cudaMalloc((void**)&Dev_Image2, Height * Width * Channels); cudaMalloc((void**)&Dev_Image_res, Height * Width * Channels); //copy CPU data to GPU cudaMemcpy(Dev_Image1, Image1, Height * Width * Channels, cudaMemcpyHostToDevice); cudaMemcpy(Dev_Image2, Image2, Height * Width * Channels, cudaMemcpyHostToDevice); cudaMemcpy(Dev_Image_res, image_res, Height * Width * Channels, cudaMemcpyHostToDevice); dim3 Grid_Image(Width, Height); switch (type_operation) { case 1: addition_arithmetic_operations_CUDA << <Grid_Image, 1 >> > (Dev_Image1, Dev_Image2, Dev_Image_res, Channels); break; case 2: substraction_arithmetic_operations_CUDA << <Grid_Image, 1 >> > (Dev_Image1, Dev_Image2, Dev_Image_res, Channels); break; case 3: multiplication_arithmetic_operations_CUDA << <Grid_Image, 1 >> > (Dev_Image1, Dev_Image2, Dev_Image_res, Channels); break; case 4: division_arithmetic_operations_CUDA << <Grid_Image, 1 >> > (Dev_Image1, Dev_Image2, Dev_Image_res, Channels); break; default: break; } //copy memory back to CPU from GPU cudaMemcpy(image_res, Dev_Image_res, Height * Width * Channels, cudaMemcpyDeviceToHost); //free up the memory of GPU cudaFree(Dev_Image1); cudaFree(Dev_Image2); cudaFree(Dev_Image_res); } __global__ void kernel_convolution_CUDA(unsigned char* Image, unsigned char* Image_res, int Channels) { int x = blockIdx.x; int y = blockIdx.y; int Image_Idx = (x + y * gridDim.x)* Channels; int new_valueB = Dev_A * Image[Image_Idx] + Dev_B; int new_valueG = Dev_A * Image[Image_Idx + 1] + Dev_B; int new_valueR = Dev_A * Image[Image_Idx + 2] + Dev_B; Image_res[Image_Idx] = new_valueB; Image_res[Image_Idx + 1] = new_valueG; Image_res[Image_Idx + 2] = new_valueR; } __global__ void kernel_median_CUDA(unsigned char *Image, unsigned char *Image_res, int imageWidth, int imageHeight) { // Set row and colum for thread. int Idy = blockIdx.y * blockDim.y + threadIdx.y; int Idx = blockIdx.x * blockDim.x + threadIdx.x; int sum = 0; if ((Idy == 0) || (Idx == 0) || (Idy == imageHeight - 1) || (Idx == imageWidth - 1)) Image_res[Idy*imageWidth + Idx] = 0; //Deal with boundry conditions else { for (int x = 0; x < WINDOW_SIZE; x++) { for (int y = 0; y < WINDOW_SIZE; y++) { sum+= Image[(Idy + x - 1)*imageWidth + (Idx + y - 1)]; } } Image_res[Idy*imageWidth + Idx] = sum/9; } } __global__ void kernel_sobel_CUDA(unsigned char * Image, unsigned char *Image_res, const unsigned int width, const unsigned int height) { int x = threadIdx.x + blockIdx.x * blockDim.x; int y = threadIdx.y + blockIdx.y * blockDim.y; float dx, dy; if (x > 0 && y > 0 && x < width - 1 && y < height - 1) { dx = (-1 * Image[(y - 1)*width + (x - 1)]) + (-2 * Image[y*width + (x - 1)]) + (-1 * Image[(y + 1)*width + (x - 1)]) + (Image[(y - 1)*width + (x + 1)]) + (2 * Image[y*width + (x + 1)]) + (Image[(y + 1)*width + (x + 1)]); dy = (Image[(y - 1)*width + (x - 1)]) + (2 * Image[(y - 1)*width + x]) + (Image[(y - 1)*width + (x + 1)]) + (-1 * Image[(y + 1)*width + (x - 1)]) + (-2 * Image[(y + 1)*width + x]) + (-1 * Image[(y + 1)*width + (x + 1)]); Image_res[y*width + x] = sqrt((dx*dx) + (dy*dy)); } } void convolution_gray_scale_CUDA(unsigned char* Image, int Height, int Width, int Channels, unsigned char* image_res, int type_convolution) { unsigned char* Dev_Image = NULL; unsigned char* Dev_Image_res = NULL; //allocate cuda variable memory cudaMalloc((void**)&Dev_Image, Height * Width * Channels); cudaMalloc((void**)&Dev_Image_res, Height * Width * Channels); //copy CPU data to GPU cudaMemcpy(Dev_Image, Image, Height * Width * Channels, cudaMemcpyHostToDevice); cudaMemcpy(Dev_Image_res, Image, Height * Width * Channels, cudaMemcpyHostToDevice); dim3 dimBlock(TILE_SIZE, TILE_SIZE); dim3 Grid_Image((int)ceil(Width/ TILE_SIZE), (int)ceil(Height/ TILE_SIZE)); switch (type_convolution) { case 1: kernel_median_CUDA << <Grid_Image, dimBlock >> > (Dev_Image, Dev_Image_res, Width, Height); break; case 2: kernel_sobel_CUDA << <Grid_Image, dimBlock >> > (Dev_Image, Dev_Image_res, Width, Height); break; default: break; } //copy memory back to CPU from GPU cudaMemcpy(image_res, Dev_Image_res, Height * Width * Channels, cudaMemcpyDeviceToHost); //free up the memory of GPU cudaFree(Dev_Image_res); cudaFree(Dev_Image); } __global__ void kernel_bilinear_interpolation_CUDA(unsigned char* Image, unsigned char* Image_eq) { int x = blockIdx.x; int y = blockIdx.y; int Image_Idx = x + y * gridDim.x; int Image_Idx_ori = x / Dev_size + y * gridDim.x / Dev_size; Image_eq[Image_Idx] = Image[Image_Idx_ori]; } void bilinear_interpolation_CUDA(unsigned char* Image, int Height, int Width, int Channels, unsigned char* image_res, int size) { Dev_size = size; unsigned char* Dev_Image = NULL; unsigned char* Dev_Image_res = NULL; //allocate cuda variable memory cudaMalloc((void**)&Dev_Image, Height * Width * Channels); cudaMalloc((void**)&Dev_Image_res, Height * Width * size * Channels); //copy CPU data to GPU cudaMemcpy(Dev_Image, Image, Height * Width * Channels, cudaMemcpyHostToDevice); cudaMemcpy(Dev_Image_res, image_res, Height * Width *size * Channels, cudaMemcpyHostToDevice); dim3 Grid_Image(Width*size, Height*size); dim3 dimBlock(TILE_SIZE, TILE_SIZE); kernel_bilinear_interpolation_CUDA << <Grid_Image, dimBlock >> > (Dev_Image, Dev_Image_res); //copy memory back to CPU from GPU cudaMemcpy(image_res, Dev_Image_res, Height * Width * Channels, cudaMemcpyDeviceToHost); //free up the memory of GPU cudaFree(Dev_Image_res); cudaFree(Dev_Image); }
0131f88d581c41914f29b124eb66c3a98a366c13.hip
// !!! This is a file automatically generated by hipify!!! #include <cassert> #include <cfloat> #include <hip/hip_runtime_api.h> #include <hip/hip_runtime.h> #include <iostream> #include <stdio.h> #include <list> #include <map> #include <math.h> #include <stdlib.h> #include <vector> #include <set> #include <algorithm> #include <iterator> #include <fstream> #include "../include/common.h" #define K 1 using namespace std; #define trans_BLOCK_SIZE 32 #define TILE_DIM 32 #define BLOCK_ROWS trans_BLOCK_SIZE #define trans_GRID_X 160 #define trans_GRID_Y 480 #define trans_NBLOCKS (trans_GRID_X*trans_GRID_Y) void computeGold( float* reference, float* idata, const unsigned int size_x, const unsigned int size_y ) { // transpose matrix for( unsigned int y = 0; y < size_y; ++y) { for( unsigned int x = 0; x < size_x; ++x) { reference[(x * size_y) + y] = idata[(y * size_x) + x]; } } } void trans_checkResults(float *h_idata, float *h_odata, int width, int height) { /* // compute reference solution int trans_mem_size = width * height *sizeof(float); float* reference = (float*) malloc( trans_mem_size); computeGold( reference, h_idata, width, height); for(int i=0; i<width*height; ++i) { if(reference[i] != d_odata[i]) { fprintf(stderr, "Failed! i = %d\n", i); return; } } printf("Test passed!\n");*/ for(unsigned int xIndex =0;xIndex<width;xIndex++){ for (unsigned int yIndex =0;yIndex<height;yIndex++){ if (xIndex < width && yIndex < height) { unsigned int index_in = xIndex + width * yIndex; unsigned int index_out = yIndex + height * xIndex; if( h_odata[index_out] != h_idata[index_in]) {printf("failed!%d %d %f %f \n ",xIndex,yIndex,h_odata[index_out],h_idata[index_in]);return;} } } } printf("GOOD!trans passed\n"); return; } __global__ void transposeNoBankConflicts(float *odata, float *idata, int width, int height) { __shared__ float tile[TILE_DIM][TILE_DIM+1]; int xIndex = blockIdx.x * TILE_DIM + threadIdx.x; int yIndex = blockIdx.y * TILE_DIM + threadIdx.y; int index_in = xIndex + (yIndex)*width; xIndex = blockIdx.y * TILE_DIM + threadIdx.x; yIndex = blockIdx.x * TILE_DIM + threadIdx.y; int index_out = xIndex + (yIndex)*height; for (int i=0; i<TILE_DIM; i+=BLOCK_ROWS) { tile[threadIdx.y+i][threadIdx.x] = idata[index_in+i*width]; } __syncthreads(); for (int i=0; i<TILE_DIM; i+=BLOCK_ROWS) { odata[index_out+i*height] = tile[threadIdx.x][threadIdx.y+i]; } } __global__ void trans_kernel(float *odata, float* idata, int width, int height) { unsigned int xIndex = blockDim.x * blockIdx.x + threadIdx.x; unsigned int yIndex = blockDim.y * blockIdx.y + threadIdx.y; if (xIndex < width && yIndex < height) { unsigned int index_in = xIndex + width * yIndex; unsigned int index_out = yIndex + height * xIndex; odata[index_out] = idata[index_in]; } } int main(int argc, char **argv) { hipSetDevice(2); srand(2013); const unsigned int trans_size_x = trans_GRID_X * trans_BLOCK_SIZE; const unsigned int trans_size_y = trans_GRID_Y * trans_BLOCK_SIZE; // size of memory required to store the matrix const unsigned int trans_mem_size = sizeof(float) * trans_size_x * trans_size_y; // allocate host memory float* h_trans_idata = (float*) malloc(trans_mem_size); // initalize the memory for( unsigned int i = 0; i < (trans_size_x * trans_size_y); ++i) { h_trans_idata[i] = (float) i; // rand(); } // allocate device memory float* d_trans_idata; float* d_trans_odata; hipMalloc( (void**) &d_trans_idata, trans_mem_size); hipMalloc( (void**) &d_trans_odata, trans_mem_size); // copy host memory to device hipMemcpy( d_trans_idata, h_trans_idata, trans_mem_size, hipMemcpyHostToDevice); hipEvent_t kernel_start, kernel_stop; hipEventCreate(&kernel_start); hipEventCreate(&kernel_stop); float kernel_time = 0.0f; hipDeviceSetCacheConfig(hipFuncCachePreferL1); hipEventRecord(kernel_start, 0); // setup execution parameters dim3 trans_grid(trans_GRID_X, trans_GRID_Y, 1); dim3 trans_block(trans_BLOCK_SIZE, trans_BLOCK_SIZE, 1); hipLaunchKernelGGL(( transposeNoBankConflicts), dim3(trans_grid), dim3(trans_block), 0, 0, d_trans_odata, d_trans_idata, trans_size_x, trans_size_y); hipDeviceSynchronize(); hipEventRecord(kernel_stop, 0); hipEventSynchronize(kernel_stop); // get elapsed time kernel_time = 0.0f; hipEventElapsedTime(&kernel_time, kernel_start, kernel_stop); kernel_time *= 1.e-3; // Convert to seconds cout << "kernel exe time: " << kernel_time << endl; float* h_trans_odata = (float*) malloc(trans_mem_size); hipMemcpy( h_trans_odata, d_trans_odata, trans_mem_size, hipMemcpyDeviceToHost); // check result trans_checkResults(h_trans_idata, h_trans_odata, trans_size_x, trans_size_y); return 0; }
0131f88d581c41914f29b124eb66c3a98a366c13.cu
#include <cassert> #include <cfloat> #include <cuda_runtime_api.h> #include <cuda.h> #include <iostream> #include <stdio.h> #include <list> #include <map> #include <math.h> #include <stdlib.h> #include <vector> #include <set> #include <algorithm> #include <iterator> #include <fstream> #include "../include/common.h" #define K 1 using namespace std; #define trans_BLOCK_SIZE 32 #define TILE_DIM 32 #define BLOCK_ROWS trans_BLOCK_SIZE #define trans_GRID_X 160 #define trans_GRID_Y 480 #define trans_NBLOCKS (trans_GRID_X*trans_GRID_Y) void computeGold( float* reference, float* idata, const unsigned int size_x, const unsigned int size_y ) { // transpose matrix for( unsigned int y = 0; y < size_y; ++y) { for( unsigned int x = 0; x < size_x; ++x) { reference[(x * size_y) + y] = idata[(y * size_x) + x]; } } } void trans_checkResults(float *h_idata, float *h_odata, int width, int height) { /* // compute reference solution int trans_mem_size = width * height *sizeof(float); float* reference = (float*) malloc( trans_mem_size); computeGold( reference, h_idata, width, height); for(int i=0; i<width*height; ++i) { if(reference[i] != d_odata[i]) { fprintf(stderr, "Failed! i = %d\n", i); return; } } printf("Test passed!\n");*/ for(unsigned int xIndex =0;xIndex<width;xIndex++){ for (unsigned int yIndex =0;yIndex<height;yIndex++){ if (xIndex < width && yIndex < height) { unsigned int index_in = xIndex + width * yIndex; unsigned int index_out = yIndex + height * xIndex; if( h_odata[index_out] != h_idata[index_in]) {printf("failed!%d %d %f %f \n ",xIndex,yIndex,h_odata[index_out],h_idata[index_in]);return;} } } } printf("GOOD!trans passed\n"); return; } __global__ void transposeNoBankConflicts(float *odata, float *idata, int width, int height) { __shared__ float tile[TILE_DIM][TILE_DIM+1]; int xIndex = blockIdx.x * TILE_DIM + threadIdx.x; int yIndex = blockIdx.y * TILE_DIM + threadIdx.y; int index_in = xIndex + (yIndex)*width; xIndex = blockIdx.y * TILE_DIM + threadIdx.x; yIndex = blockIdx.x * TILE_DIM + threadIdx.y; int index_out = xIndex + (yIndex)*height; for (int i=0; i<TILE_DIM; i+=BLOCK_ROWS) { tile[threadIdx.y+i][threadIdx.x] = idata[index_in+i*width]; } __syncthreads(); for (int i=0; i<TILE_DIM; i+=BLOCK_ROWS) { odata[index_out+i*height] = tile[threadIdx.x][threadIdx.y+i]; } } __global__ void trans_kernel(float *odata, float* idata, int width, int height) { unsigned int xIndex = blockDim.x * blockIdx.x + threadIdx.x; unsigned int yIndex = blockDim.y * blockIdx.y + threadIdx.y; if (xIndex < width && yIndex < height) { unsigned int index_in = xIndex + width * yIndex; unsigned int index_out = yIndex + height * xIndex; odata[index_out] = idata[index_in]; } } int main(int argc, char **argv) { cudaSetDevice(2); srand(2013); const unsigned int trans_size_x = trans_GRID_X * trans_BLOCK_SIZE; const unsigned int trans_size_y = trans_GRID_Y * trans_BLOCK_SIZE; // size of memory required to store the matrix const unsigned int trans_mem_size = sizeof(float) * trans_size_x * trans_size_y; // allocate host memory float* h_trans_idata = (float*) malloc(trans_mem_size); // initalize the memory for( unsigned int i = 0; i < (trans_size_x * trans_size_y); ++i) { h_trans_idata[i] = (float) i; // rand(); } // allocate device memory float* d_trans_idata; float* d_trans_odata; cudaMalloc( (void**) &d_trans_idata, trans_mem_size); cudaMalloc( (void**) &d_trans_odata, trans_mem_size); // copy host memory to device cudaMemcpy( d_trans_idata, h_trans_idata, trans_mem_size, cudaMemcpyHostToDevice); cudaEvent_t kernel_start, kernel_stop; cudaEventCreate(&kernel_start); cudaEventCreate(&kernel_stop); float kernel_time = 0.0f; cudaDeviceSetCacheConfig(cudaFuncCachePreferL1); cudaEventRecord(kernel_start, 0); // setup execution parameters dim3 trans_grid(trans_GRID_X, trans_GRID_Y, 1); dim3 trans_block(trans_BLOCK_SIZE, trans_BLOCK_SIZE, 1); transposeNoBankConflicts<<<trans_grid, trans_block>>>(d_trans_odata, d_trans_idata, trans_size_x, trans_size_y); cudaDeviceSynchronize(); cudaEventRecord(kernel_stop, 0); cudaEventSynchronize(kernel_stop); // get elapsed time kernel_time = 0.0f; cudaEventElapsedTime(&kernel_time, kernel_start, kernel_stop); kernel_time *= 1.e-3; // Convert to seconds cout << "kernel exe time: " << kernel_time << endl; float* h_trans_odata = (float*) malloc(trans_mem_size); cudaMemcpy( h_trans_odata, d_trans_odata, trans_mem_size, cudaMemcpyDeviceToHost); // check result trans_checkResults(h_trans_idata, h_trans_odata, trans_size_x, trans_size_y); return 0; }
14d801e8dbb21e7c1658a551de7d57225bb005d3.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" // Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. #include "paddle/phi/kernels/embedding_grad_kernel.h" #include "paddle/fluid/framework/mixed_vector.h" #include "paddle/fluid/memory/memcpy.h" #include "paddle/fluid/platform/device/gpu/gpu_primitives.h" #include "paddle/phi/backends/gpu/gpu_context.h" #include "paddle/phi/common/data_type.h" #include "paddle/phi/core/kernel_registry.h" #include "paddle/phi/kernels/funcs/eigen/common.h" #include "paddle/phi/kernels/funcs/embedding_util.h" DECLARE_bool(cudnn_deterministic); namespace phi { template <typename InT, typename OutT> __global__ void InputTypeConvert(const InT* in_ids, const int64_t K, OutT* out_ids) { for (int i = 0; i < K; i++) { out_ids[i] = static_cast<OutT>(in_ids[i]); } } template <typename T, typename IdT> __global__ void EmbeddingGrad(T* table, const T* output, const IdT* ids, const int64_t N, const int64_t K, const int64_t D) { int idx = threadIdx.x; int idy = blockIdx.x + threadIdx.y * gridDim.x; while (idy < K) { auto id = static_cast<int64_t>(ids[idy]); const T* out = output + idy * D; T* tab = table + id * D; #ifdef PADDLE_WITH_CUDA paddle::platform::VectorizedAtomicAddPerBlock(D, idx, blockDim.x, out, tab); #else for (int i = idx; i < D; i += blockDim.x) { paddle::platform::CudaAtomicAdd(&tab[i], out[i]); } #endif idy += blockDim.y * gridDim.x; } } template <typename T, typename Context> struct EmbeddingGradCUDAFunctor { EmbeddingGradCUDAFunctor(const Context& dev_ctx, const DenseTensor& input, const DenseTensor& weight, const DenseTensor& out_grad, int64_t padding_idx, DenseTensor* weight_grad) : dev_ctx_(dev_ctx), input_(input), weight_(weight), out_grad_(out_grad), padding_idx_(padding_idx), weight_grad_(weight_grad) {} template <typename IdT> void apply() { // Since paddings are not trainable and fixed in forward, the gradient of // paddings makes no sense and we don't deal with it in backward. { auto d_output_t = out_grad_; auto d_table_t = weight_grad_; int N = weight_grad_->dims()[0]; int D = weight_grad_->dims()[1]; int K = input_.numel(); const T* d_output = d_output_t.template data<T>(); const auto* ids = input_.template data<IdT>(); T* d_table = dev_ctx_.template Alloc<T>(d_table_t); #ifdef PADDLE_WITH_HIP PADDLE_ENFORCE_GPU_SUCCESS( hipMemsetAsync(d_table, 0, N * D * sizeof(T), dev_ctx_.stream())); #else PADDLE_ENFORCE_GPU_SUCCESS( hipMemsetAsync(d_table, 0, N * D * sizeof(T), dev_ctx_.stream())); #endif const int gridx = 2 * dev_ctx_.GetSMCount(); dim3 threads(128, 8); dim3 grids(gridx, 1); if (FLAGS_cudnn_deterministic) { VLOG(2) << "Run grad kernel of embedding with single thread."; grids.x = 1; } hipLaunchKernelGGL(( EmbeddingGrad<T, IdT>), dim3(grids), dim3(threads), 0, dev_ctx_.stream(), d_table, d_output, ids, N, K, D); } } private: const phi::GPUContext& dev_ctx_; const DenseTensor& input_; const DenseTensor& weight_; const DenseTensor& out_grad_; int64_t padding_idx_; DenseTensor* weight_grad_; }; template <typename T, typename Context> void EmbeddingGradKernel(const Context& ctx, const DenseTensor& input, const DenseTensor& weight, const DenseTensor& out_grad, int64_t padding_idx, DenseTensor* weight_grad) { EmbeddingGradCUDAFunctor<T, Context> functor( ctx, input, weight, out_grad, padding_idx, weight_grad); if (input.dtype() == phi::DataType::INT32) { functor.template apply<int>(); } else if (input.dtype() == phi::DataType::INT64) { functor.template apply<int64_t>(); } else if (input.dtype() == phi::DataType::INT16) { functor.template apply<int16_t>(); } else { PADDLE_THROW(phi::errors::Unimplemented( "emebdding input only support int16, int32 and int64")); } } template <typename T, typename Context> struct EmbeddingSparseGradCUDAFunctor { EmbeddingSparseGradCUDAFunctor(const Context& dev_ctx, const DenseTensor& input, const DenseTensor& weight, const DenseTensor& out_grad, int64_t padding_idx, SelectedRows* weight_grad) : dev_ctx_(dev_ctx), input_(input), weight_(weight), out_grad_(out_grad), padding_idx_(padding_idx), weight_grad_(weight_grad) {} template <typename IdT> void apply() { // Since paddings are not trainable and fixed in forward, the gradient of // paddings makes no sense and we don't deal with it in backward. const auto* ids_data = input_.template data<IdT>(); auto* d_table = weight_grad_; auto* table = &weight_; auto* d_output = &out_grad_; int64_t ids_num = input_.numel(); dim3 threads(128, 8); dim3 grids(8, 1); auto stream = dev_ctx_.stream(); paddle::framework::Vector<int64_t> new_rows; new_rows.resize(ids_num); auto gpu_place = dev_ctx_.GetPlace(); paddle::framework::MixVector<int64_t> mixv_new_rows(&new_rows); if (!std::is_same<IdT, int64_t>::value) { hipLaunchKernelGGL(( InputTypeConvert), dim3(grids), dim3(threads), 0, stream, ids_data, ids_num, mixv_new_rows.MutableData(gpu_place)); } else { paddle::memory::Copy(gpu_place, mixv_new_rows.CUDAMutableData(gpu_place), gpu_place, ids_data, ids_num * sizeof(int64_t), stream); } mixv_new_rows.CopyToCPU(); d_table->set_rows(new_rows); auto* d_table_value = d_table->mutable_value(); d_table_value->Resize({ids_num, table->dims()[1]}); dev_ctx_.template Alloc<T>(d_table_value); auto* d_table_data = d_table_value->template data<T>(); auto* d_output_data = d_output->template data<T>(); auto d_output_dims = d_output->dims(); auto d_output_dims_2d = phi::flatten_to_2d(d_output_dims, d_output_dims.size() - 1); PADDLE_ENFORCE_EQ(d_table_value->dims(), d_output_dims_2d, phi::errors::InvalidArgument( "ShapeError: The shape of lookup_table@Grad and " "output@Grad should be same. " "But received lookup_table@Grad's shape = [%s], " "output@Grad's shape = [%s].", d_table_value->dims(), d_output_dims_2d)); paddle::memory::Copy(gpu_place, d_table_data, gpu_place, d_output_data, d_output->numel() * sizeof(T), stream); } private: const phi::GPUContext& dev_ctx_; const DenseTensor& input_; const DenseTensor& weight_; const DenseTensor& out_grad_; int64_t padding_idx_; SelectedRows* weight_grad_; }; template <typename T, typename Context> void EmbeddingSparseGradKernel(const Context& ctx, const DenseTensor& input, const DenseTensor& weight, const DenseTensor& out_grad, int64_t padding_idx, SelectedRows* weight_grad) { EmbeddingSparseGradCUDAFunctor<T, Context> functor( ctx, input, weight, out_grad, padding_idx, weight_grad); if (input.dtype() == phi::DataType::INT32) { functor.template apply<int>(); } else if (input.dtype() == phi::DataType::INT64) { functor.template apply<int64_t>(); } else if (input.dtype() == phi::DataType::INT16) { functor.template apply<int16_t>(); PADDLE_THROW(phi::errors::Unimplemented( "emebdding input only support int16, int32 and int64")); } } } // namespace phi PD_REGISTER_KERNEL(embedding_grad, GPU, ALL_LAYOUT, phi::EmbeddingGradKernel, float, double, phi::dtype::float16, phi::dtype::bfloat16) {} PD_REGISTER_KERNEL(embedding_sparse_grad, GPU, ALL_LAYOUT, phi::EmbeddingSparseGradKernel, float, double, phi::dtype::float16, phi::dtype::bfloat16) {}
14d801e8dbb21e7c1658a551de7d57225bb005d3.cu
// Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. #include "paddle/phi/kernels/embedding_grad_kernel.h" #include "paddle/fluid/framework/mixed_vector.h" #include "paddle/fluid/memory/memcpy.h" #include "paddle/fluid/platform/device/gpu/gpu_primitives.h" #include "paddle/phi/backends/gpu/gpu_context.h" #include "paddle/phi/common/data_type.h" #include "paddle/phi/core/kernel_registry.h" #include "paddle/phi/kernels/funcs/eigen/common.h" #include "paddle/phi/kernels/funcs/embedding_util.h" DECLARE_bool(cudnn_deterministic); namespace phi { template <typename InT, typename OutT> __global__ void InputTypeConvert(const InT* in_ids, const int64_t K, OutT* out_ids) { for (int i = 0; i < K; i++) { out_ids[i] = static_cast<OutT>(in_ids[i]); } } template <typename T, typename IdT> __global__ void EmbeddingGrad(T* table, const T* output, const IdT* ids, const int64_t N, const int64_t K, const int64_t D) { int idx = threadIdx.x; int idy = blockIdx.x + threadIdx.y * gridDim.x; while (idy < K) { auto id = static_cast<int64_t>(ids[idy]); const T* out = output + idy * D; T* tab = table + id * D; #ifdef PADDLE_WITH_CUDA paddle::platform::VectorizedAtomicAddPerBlock(D, idx, blockDim.x, out, tab); #else for (int i = idx; i < D; i += blockDim.x) { paddle::platform::CudaAtomicAdd(&tab[i], out[i]); } #endif idy += blockDim.y * gridDim.x; } } template <typename T, typename Context> struct EmbeddingGradCUDAFunctor { EmbeddingGradCUDAFunctor(const Context& dev_ctx, const DenseTensor& input, const DenseTensor& weight, const DenseTensor& out_grad, int64_t padding_idx, DenseTensor* weight_grad) : dev_ctx_(dev_ctx), input_(input), weight_(weight), out_grad_(out_grad), padding_idx_(padding_idx), weight_grad_(weight_grad) {} template <typename IdT> void apply() { // Since paddings are not trainable and fixed in forward, the gradient of // paddings makes no sense and we don't deal with it in backward. { auto d_output_t = out_grad_; auto d_table_t = weight_grad_; int N = weight_grad_->dims()[0]; int D = weight_grad_->dims()[1]; int K = input_.numel(); const T* d_output = d_output_t.template data<T>(); const auto* ids = input_.template data<IdT>(); T* d_table = dev_ctx_.template Alloc<T>(d_table_t); #ifdef PADDLE_WITH_HIP PADDLE_ENFORCE_GPU_SUCCESS( hipMemsetAsync(d_table, 0, N * D * sizeof(T), dev_ctx_.stream())); #else PADDLE_ENFORCE_GPU_SUCCESS( cudaMemsetAsync(d_table, 0, N * D * sizeof(T), dev_ctx_.stream())); #endif const int gridx = 2 * dev_ctx_.GetSMCount(); dim3 threads(128, 8); dim3 grids(gridx, 1); if (FLAGS_cudnn_deterministic) { VLOG(2) << "Run grad kernel of embedding with single thread."; grids.x = 1; } EmbeddingGrad<T, IdT><<<grids, threads, 0, dev_ctx_.stream()>>>( d_table, d_output, ids, N, K, D); } } private: const phi::GPUContext& dev_ctx_; const DenseTensor& input_; const DenseTensor& weight_; const DenseTensor& out_grad_; int64_t padding_idx_; DenseTensor* weight_grad_; }; template <typename T, typename Context> void EmbeddingGradKernel(const Context& ctx, const DenseTensor& input, const DenseTensor& weight, const DenseTensor& out_grad, int64_t padding_idx, DenseTensor* weight_grad) { EmbeddingGradCUDAFunctor<T, Context> functor( ctx, input, weight, out_grad, padding_idx, weight_grad); if (input.dtype() == phi::DataType::INT32) { functor.template apply<int>(); } else if (input.dtype() == phi::DataType::INT64) { functor.template apply<int64_t>(); } else if (input.dtype() == phi::DataType::INT16) { functor.template apply<int16_t>(); } else { PADDLE_THROW(phi::errors::Unimplemented( "emebdding input only support int16, int32 and int64")); } } template <typename T, typename Context> struct EmbeddingSparseGradCUDAFunctor { EmbeddingSparseGradCUDAFunctor(const Context& dev_ctx, const DenseTensor& input, const DenseTensor& weight, const DenseTensor& out_grad, int64_t padding_idx, SelectedRows* weight_grad) : dev_ctx_(dev_ctx), input_(input), weight_(weight), out_grad_(out_grad), padding_idx_(padding_idx), weight_grad_(weight_grad) {} template <typename IdT> void apply() { // Since paddings are not trainable and fixed in forward, the gradient of // paddings makes no sense and we don't deal with it in backward. const auto* ids_data = input_.template data<IdT>(); auto* d_table = weight_grad_; auto* table = &weight_; auto* d_output = &out_grad_; int64_t ids_num = input_.numel(); dim3 threads(128, 8); dim3 grids(8, 1); auto stream = dev_ctx_.stream(); paddle::framework::Vector<int64_t> new_rows; new_rows.resize(ids_num); auto gpu_place = dev_ctx_.GetPlace(); paddle::framework::MixVector<int64_t> mixv_new_rows(&new_rows); if (!std::is_same<IdT, int64_t>::value) { InputTypeConvert<<<grids, threads, 0, stream>>>( ids_data, ids_num, mixv_new_rows.MutableData(gpu_place)); } else { paddle::memory::Copy(gpu_place, mixv_new_rows.CUDAMutableData(gpu_place), gpu_place, ids_data, ids_num * sizeof(int64_t), stream); } mixv_new_rows.CopyToCPU(); d_table->set_rows(new_rows); auto* d_table_value = d_table->mutable_value(); d_table_value->Resize({ids_num, table->dims()[1]}); dev_ctx_.template Alloc<T>(d_table_value); auto* d_table_data = d_table_value->template data<T>(); auto* d_output_data = d_output->template data<T>(); auto d_output_dims = d_output->dims(); auto d_output_dims_2d = phi::flatten_to_2d(d_output_dims, d_output_dims.size() - 1); PADDLE_ENFORCE_EQ(d_table_value->dims(), d_output_dims_2d, phi::errors::InvalidArgument( "ShapeError: The shape of lookup_table@Grad and " "output@Grad should be same. " "But received lookup_table@Grad's shape = [%s], " "output@Grad's shape = [%s].", d_table_value->dims(), d_output_dims_2d)); paddle::memory::Copy(gpu_place, d_table_data, gpu_place, d_output_data, d_output->numel() * sizeof(T), stream); } private: const phi::GPUContext& dev_ctx_; const DenseTensor& input_; const DenseTensor& weight_; const DenseTensor& out_grad_; int64_t padding_idx_; SelectedRows* weight_grad_; }; template <typename T, typename Context> void EmbeddingSparseGradKernel(const Context& ctx, const DenseTensor& input, const DenseTensor& weight, const DenseTensor& out_grad, int64_t padding_idx, SelectedRows* weight_grad) { EmbeddingSparseGradCUDAFunctor<T, Context> functor( ctx, input, weight, out_grad, padding_idx, weight_grad); if (input.dtype() == phi::DataType::INT32) { functor.template apply<int>(); } else if (input.dtype() == phi::DataType::INT64) { functor.template apply<int64_t>(); } else if (input.dtype() == phi::DataType::INT16) { functor.template apply<int16_t>(); PADDLE_THROW(phi::errors::Unimplemented( "emebdding input only support int16, int32 and int64")); } } } // namespace phi PD_REGISTER_KERNEL(embedding_grad, GPU, ALL_LAYOUT, phi::EmbeddingGradKernel, float, double, phi::dtype::float16, phi::dtype::bfloat16) {} PD_REGISTER_KERNEL(embedding_sparse_grad, GPU, ALL_LAYOUT, phi::EmbeddingSparseGradKernel, float, double, phi::dtype::float16, phi::dtype::bfloat16) {}
0a3ba52805ed5f73bd7b44ae626e6bec0f7d64fe.hip
// !!! This is a file automatically generated by hipify!!! #include <unittest/unittest.h> #include <thrust/functional.h> #include <thrust/sequence.h> #include <thrust/device_malloc_allocator.h> #include <thrust/sort.h> #include <thrust/system/hip/detail/detail/stable_radix_sort.h> #if THRUST_DEVICE_SYSTEM == THRUST_DEVICE_SYSTEM_CUDA typedef unittest::type_list< #if !(defined(__GNUC__) && (__GNUC__ <= 4) && (__GNUC_MINOR__ <= 1)) // XXX GCC 4.1 miscompiles the char sorts with -O2 for some reason unsigned char, #endif unsigned short, unsigned int, unsigned long, unsigned long long> UnsignedIntegerTypes; template <typename T> struct TestRadixSortUnaligned { void operator()(const size_t n) { typedef thrust::device_vector<T> Vector; Vector unsorted_keys = unittest::random_integers<T>(n); Vector sorted_keys = unsorted_keys; thrust::sort(sorted_keys.begin(), sorted_keys.end()); for(int offset = 1; offset < 4; offset++) { Vector unaligned_unsorted_keys(n + offset, 0); Vector unaligned_sorted_keys(n + offset, 0); thrust::copy(unsorted_keys.begin(), unsorted_keys.end(), unaligned_unsorted_keys.begin() + offset); thrust::copy( sorted_keys.begin(), sorted_keys.end(), unaligned_sorted_keys.begin() + offset); thrust::hip::tag cuda_tag; thrust::system::cuda::detail::detail::stable_radix_sort(cuda_tag, unaligned_unsorted_keys.begin() + offset, unaligned_unsorted_keys.end()); ASSERT_EQUAL(unaligned_unsorted_keys, unaligned_sorted_keys); } } }; VariableUnitTest<TestRadixSortUnaligned, UnsignedIntegerTypes> TestRadixSortUnalignedInstance; #endif // THRUST_DEVICE_SYSTEM == THRUST_DEVICE_SYSTEM_CUDA
0a3ba52805ed5f73bd7b44ae626e6bec0f7d64fe.cu
#include <unittest/unittest.h> #include <thrust/functional.h> #include <thrust/sequence.h> #include <thrust/device_malloc_allocator.h> #include <thrust/sort.h> #include <thrust/system/cuda/detail/detail/stable_radix_sort.h> #if THRUST_DEVICE_SYSTEM == THRUST_DEVICE_SYSTEM_CUDA typedef unittest::type_list< #if !(defined(__GNUC__) && (__GNUC__ <= 4) && (__GNUC_MINOR__ <= 1)) // XXX GCC 4.1 miscompiles the char sorts with -O2 for some reason unsigned char, #endif unsigned short, unsigned int, unsigned long, unsigned long long> UnsignedIntegerTypes; template <typename T> struct TestRadixSortUnaligned { void operator()(const size_t n) { typedef thrust::device_vector<T> Vector; Vector unsorted_keys = unittest::random_integers<T>(n); Vector sorted_keys = unsorted_keys; thrust::sort(sorted_keys.begin(), sorted_keys.end()); for(int offset = 1; offset < 4; offset++) { Vector unaligned_unsorted_keys(n + offset, 0); Vector unaligned_sorted_keys(n + offset, 0); thrust::copy(unsorted_keys.begin(), unsorted_keys.end(), unaligned_unsorted_keys.begin() + offset); thrust::copy( sorted_keys.begin(), sorted_keys.end(), unaligned_sorted_keys.begin() + offset); thrust::cuda::tag cuda_tag; thrust::system::cuda::detail::detail::stable_radix_sort(cuda_tag, unaligned_unsorted_keys.begin() + offset, unaligned_unsorted_keys.end()); ASSERT_EQUAL(unaligned_unsorted_keys, unaligned_sorted_keys); } } }; VariableUnitTest<TestRadixSortUnaligned, UnsignedIntegerTypes> TestRadixSortUnalignedInstance; #endif // THRUST_DEVICE_SYSTEM == THRUST_DEVICE_SYSTEM_CUDA
a1b50e18364e17862257a1c4e3d48473fe5a59d9.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include "Fractal.cuh" #include "device_launch_parameters.h" #include <opencv2/imgproc.hpp> #include <opencv2/highgui.hpp> __device__ bool julian(int x, int y, int nRows, int nCols, float rc, float imc, float scaleFactor) { float jx = scaleFactor * ((float)(nCols / 2) - x) / (nCols / 2); float jy = scaleFactor * ((float)(nRows / 2) - y) / (nRows / 2); cudaComplex c(rc, imc); cudaComplex z(jx, jy); int iteration = 150; bool isFrct = true; for (int i = 0; i < iteration; ++i) { z = z * z + c; if (z.magnitude2() > 1000) { isFrct = false; break; } } return isFrct; } __global__ void FractalDraw(uchar* data, int nRows, int nCols, float rc, float imc, float scaleFactor) { int x = threadIdx.x + blockIdx.x*blockDim.x; int y = threadIdx.y + blockIdx.y*blockDim.y; int offset = x + y * blockDim.x*gridDim.x; // if (x < nCols && y < nRows) { bool isFrct = julian(x, y, nRows, nCols, rc, imc, scaleFactor); data[offset * 3 + 0] = 120; data[offset * 3 + 1] = 65; data[offset * 3 + 2] = 255 * isFrct; } }
a1b50e18364e17862257a1c4e3d48473fe5a59d9.cu
#include "Fractal.cuh" #include "device_launch_parameters.h" #include <opencv2/imgproc.hpp> #include <opencv2/highgui.hpp> __device__ bool julian(int x, int y, int nRows, int nCols, float rc, float imc, float scaleFactor) { float jx = scaleFactor * ((float)(nCols / 2) - x) / (nCols / 2); float jy = scaleFactor * ((float)(nRows / 2) - y) / (nRows / 2); cudaComplex c(rc, imc); cudaComplex z(jx, jy); int iteration = 150; bool isFrct = true; for (int i = 0; i < iteration; ++i) { z = z * z + c; if (z.magnitude2() > 1000) { isFrct = false; break; } } return isFrct; } __global__ void FractalDraw(uchar* data, int nRows, int nCols, float rc, float imc, float scaleFactor) { int x = threadIdx.x + blockIdx.x*blockDim.x; int y = threadIdx.y + blockIdx.y*blockDim.y; int offset = x + y * blockDim.x*gridDim.x; // if (x < nCols && y < nRows) { bool isFrct = julian(x, y, nRows, nCols, rc, imc, scaleFactor); data[offset * 3 + 0] = 120; data[offset * 3 + 1] = 65; data[offset * 3 + 2] = 255 * isFrct; } }
6ea4770b96047fcfa29bfcd2b275722f6967678d.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /////////////////////////////////////////////////////////////////////////// // ParallelWaveFunction: // /////////////////////////////////////////////////////////////////////////// #include "ParallelWaveFunction.h" //________________________________________________________________________________________________________________ __device__ int GetBinNumber(double aBinSize, int aNbins, double aValue) { //TODO check the accuracy of this double tBinKStarMin, tBinKStarMax; for(int i=0; i<aNbins; i++) { tBinKStarMin = i*aBinSize; tBinKStarMax = (i+1)*aBinSize; if(aValue>=tBinKStarMin && aValue<tBinKStarMax) return i; } return -1; //i.e. failure } //________________________________________________________________________________________________________________ __device__ int GetBinNumber(int aNbins, double aMin, double aMax, double aValue) { //TODO check the accuracy of this double tBinSize = (aMax-aMin)/aNbins; double tBinKStarMin, tBinKStarMax; for(int i=0; i<aNbins; i++) { tBinKStarMin = i*tBinSize + aMin; tBinKStarMax = (i+1)*tBinSize + aMin; if(aValue>=tBinKStarMin && aValue<tBinKStarMax) return i; } return -1; //i.e. failure } //________________________________________________________________________________________________________________ __device__ int GetBinNumber(double aBinWidth, double aMin, double aMax, double aValue) { //TODO check the accuracy of this int tNbins = (aMax-aMin)/aBinWidth; double tBinKStarMin, tBinKStarMax; for(int i=0; i<tNbins; i++) { tBinKStarMin = i*aBinWidth + aMin; tBinKStarMax = (i+1)*aBinWidth + aMin; if(aValue>=tBinKStarMin && aValue<tBinKStarMax) return i; } return -1; //i.e. failure } //________________________________________________________________________________________________________________ __device__ int GetInterpLowBin(InterpType aInterpType, InterpAxisType aAxisType, double aVal) { int tReturnBin = -2; int tNbins, tBin; double tMin, tMax, tBinWidth, tBinCenter; bool tErrorFlag = false; switch(aInterpType) { case kGTilde: switch(aAxisType) { case kKaxis: tNbins = d_fGTildeInfo->nBinsK; tBinWidth = d_fGTildeInfo->binWidthK; tMin = d_fGTildeInfo->minK; tMax = d_fGTildeInfo->maxK; break; case kRaxis: tNbins = d_fGTildeInfo->nBinsR; tBinWidth = d_fGTildeInfo->binWidthR; tMin = d_fGTildeInfo->minR; tMax = d_fGTildeInfo->maxR; break; //Invalid axis selection case kThetaaxis: tErrorFlag = true; break; case kReF0axis: tErrorFlag = true; break; case kImF0axis: tErrorFlag = true; break; case kD0axis: tErrorFlag = true; break; } break; case kHyperGeo1F1: switch(aAxisType) { case kKaxis: tNbins = d_fHyperGeo1F1Info->nBinsK; tBinWidth = d_fHyperGeo1F1Info->binWidthK; tMin = d_fHyperGeo1F1Info->minK; tMax = d_fHyperGeo1F1Info->maxK; break; case kRaxis: tNbins = d_fHyperGeo1F1Info->nBinsR; tBinWidth = d_fHyperGeo1F1Info->binWidthR; tMin = d_fHyperGeo1F1Info->minR; tMax = d_fHyperGeo1F1Info->maxR; break; case kThetaaxis: tNbins = d_fHyperGeo1F1Info->nBinsTheta; tBinWidth = d_fHyperGeo1F1Info->binWidthTheta; tMin = d_fHyperGeo1F1Info->minTheta; tMax = d_fHyperGeo1F1Info->maxTheta; break; //Invalid axis selection case kReF0axis: tErrorFlag = true; break; case kImF0axis: tErrorFlag = true; break; case kD0axis: tErrorFlag = true; break; } break; case kScattLen: switch(aAxisType) { case kReF0axis: tNbins = d_fScattLenInfo->nBinsReF0; tBinWidth = d_fScattLenInfo->binWidthReF0; tMin = d_fScattLenInfo->minReF0; tMax = d_fScattLenInfo->maxReF0; break; case kImF0axis: tNbins = d_fScattLenInfo->nBinsImF0; tBinWidth = d_fScattLenInfo->binWidthImF0; tMin = d_fScattLenInfo->minImF0; tMax = d_fScattLenInfo->maxImF0; break; case kD0axis: tNbins = d_fScattLenInfo->nBinsD0; tBinWidth = d_fScattLenInfo->binWidthD0; tMin = d_fScattLenInfo->minD0; tMax = d_fScattLenInfo->maxD0; break; case kKaxis: tNbins = d_fScattLenInfo->nBinsK; tBinWidth = d_fScattLenInfo->binWidthK; tMin = d_fScattLenInfo->minK; tMax = d_fScattLenInfo->maxK; break; //Invalid axis selection case kRaxis: tErrorFlag = true; break; case kThetaaxis: tErrorFlag = true; break; } break; } //Check error if(tErrorFlag) return -2; //--------------------------------- tBin = GetBinNumber(tNbins,tMin,tMax,aVal); tBinCenter = tMin + (tBin+0.5)*tBinWidth; if(aVal < tBinCenter) tReturnBin = tBin-1; else tReturnBin = tBin; if(tReturnBin<0 || tReturnBin >= tNbins) return -2; else return tReturnBin; } //________________________________________________________________________________________________________________ __device__ double GetInterpLowBinCenter(InterpType aInterpType, InterpAxisType aAxisType, double aVal) { double tReturnValue; int tReturnBin = -2; int tNbins, tBin; double tMin, tMax, tBinWidth, tBinCenter; bool tErrorFlag = false; switch(aInterpType) { case kGTilde: switch(aAxisType) { case kKaxis: tNbins = d_fGTildeInfo->nBinsK; tBinWidth = d_fGTildeInfo->binWidthK; tMin = d_fGTildeInfo->minK; tMax = d_fGTildeInfo->maxK; break; case kRaxis: tNbins = d_fGTildeInfo->nBinsR; tBinWidth = d_fGTildeInfo->binWidthR; tMin = d_fGTildeInfo->minR; tMax = d_fGTildeInfo->maxR; break; //Invalid axis selection case kThetaaxis: tErrorFlag = true; break; case kReF0axis: tErrorFlag = true; break; case kImF0axis: tErrorFlag = true; break; case kD0axis: tErrorFlag = true; break; } break; case kHyperGeo1F1: switch(aAxisType) { case kKaxis: tNbins = d_fHyperGeo1F1Info->nBinsK; tBinWidth = d_fHyperGeo1F1Info->binWidthK; tMin = d_fHyperGeo1F1Info->minK; tMax = d_fHyperGeo1F1Info->maxK; break; case kRaxis: tNbins = d_fHyperGeo1F1Info->nBinsR; tBinWidth = d_fHyperGeo1F1Info->binWidthR; tMin = d_fHyperGeo1F1Info->minR; tMax = d_fHyperGeo1F1Info->maxR; break; case kThetaaxis: tNbins = d_fHyperGeo1F1Info->nBinsTheta; tBinWidth = d_fHyperGeo1F1Info->binWidthTheta; tMin = d_fHyperGeo1F1Info->minTheta; tMax = d_fHyperGeo1F1Info->maxTheta; break; //Invalid axis selection case kReF0axis: tErrorFlag = true; break; case kImF0axis: tErrorFlag = true; break; case kD0axis: tErrorFlag = true; break; } break; case kScattLen: switch(aAxisType) { case kReF0axis: tNbins = d_fScattLenInfo->nBinsReF0; tBinWidth = d_fScattLenInfo->binWidthReF0; tMin = d_fScattLenInfo->minReF0; tMax = d_fScattLenInfo->maxReF0; break; case kImF0axis: tNbins = d_fScattLenInfo->nBinsImF0; tBinWidth = d_fScattLenInfo->binWidthImF0; tMin = d_fScattLenInfo->minImF0; tMax = d_fScattLenInfo->maxImF0; break; case kD0axis: tNbins = d_fScattLenInfo->nBinsD0; tBinWidth = d_fScattLenInfo->binWidthD0; tMin = d_fScattLenInfo->minD0; tMax = d_fScattLenInfo->maxD0; break; case kKaxis: tNbins = d_fScattLenInfo->nBinsK; tBinWidth = d_fScattLenInfo->binWidthK; tMin = d_fScattLenInfo->minK; tMax = d_fScattLenInfo->maxK; break; //Invalid axis selection case kRaxis: tErrorFlag = true; break; case kThetaaxis: tErrorFlag = true; break; } break; } //Check error if(tErrorFlag) return -2; //--------------------------------- tBin = GetBinNumber(tNbins,tMin,tMax,aVal); tBinCenter = tMin + (tBin+0.5)*tBinWidth; if(aVal < tBinCenter) tReturnBin = tBin-1; else tReturnBin = tBin; if(tReturnBin<0 || tReturnBin >= tNbins) return -2; tReturnValue = tMin + (tReturnBin+0.5)*tBinWidth; return tReturnValue; } //________________________________________________________________________________________________________________ __device__ hipDoubleComplex GTildeInterpolate(double aKStar, double aRStar) { double tResultReal = 0.; double tResultImag = 0.; //---------------------------- int tNbinsR = d_fGTildeInfo->nBinsR; //---------------------------- //TODO put in check to make sure GetInterpLowBinCenter does not return the error -2 double tBinWidthK = d_fGTildeInfo->binWidthK; int tBinLowK = GetInterpLowBin(kGTilde,kKaxis,aKStar); int tBinHighK = tBinLowK+1; double tBinLowCenterK = GetInterpLowBinCenter(kGTilde,kKaxis,aKStar); double tBinHighCenterK = tBinLowCenterK+tBinWidthK; double tBinWidthR = d_fGTildeInfo->binWidthR; int tBinLowR = GetInterpLowBin(kGTilde,kRaxis,aRStar); int tBinHighR = tBinLowR+1; double tBinLowCenterR = GetInterpLowBinCenter(kGTilde,kRaxis,aRStar); double tBinHighCenterR = tBinLowCenterR+tBinWidthR; //-------------------------- double tQ11Real = d_fGTildeReal[tBinLowR + tBinLowK*tNbinsR]; double tQ12Real = d_fGTildeReal[tBinHighR + tBinLowK*tNbinsR]; double tQ21Real = d_fGTildeReal[tBinLowR + tBinHighK*tNbinsR]; double tQ22Real = d_fGTildeReal[tBinHighR + tBinHighK*tNbinsR]; double tQ11Imag = d_fGTildeImag[tBinLowR + tBinLowK*tNbinsR]; double tQ12Imag = d_fGTildeImag[tBinHighR + tBinLowK*tNbinsR]; double tQ21Imag = d_fGTildeImag[tBinLowR + tBinHighK*tNbinsR]; double tQ22Imag = d_fGTildeImag[tBinHighR + tBinHighK*tNbinsR]; //-------------------------- double tD = 1.0*tBinWidthK*tBinWidthR; tResultReal = (1.0/tD)*(tQ11Real*(tBinHighCenterK-aKStar)*(tBinHighCenterR-aRStar) + tQ21Real*(aKStar-tBinLowCenterK)*(tBinHighCenterR-aRStar) + tQ12Real*(tBinHighCenterK-aKStar)*(aRStar-tBinLowCenterR) + tQ22Real*(aKStar-tBinLowCenterK)*(aRStar-tBinLowCenterR)); tResultImag = (1.0/tD)*(tQ11Imag*(tBinHighCenterK-aKStar)*(tBinHighCenterR-aRStar) + tQ21Imag*(aKStar-tBinLowCenterK)*(tBinHighCenterR-aRStar) + tQ12Imag*(tBinHighCenterK-aKStar)*(aRStar-tBinLowCenterR) + tQ22Imag*(aKStar-tBinLowCenterK)*(aRStar-tBinLowCenterR)); //-------------------------- hipDoubleComplex tReturnValue = make_cuDoubleComplex(tResultReal,tResultImag); return tReturnValue; } //________________________________________________________________________________________________________________ __device__ hipDoubleComplex HyperGeo1F1Interpolate(double aKStar, double aRStar, double aTheta) { double tResultReal = 0.; double tResultImag = 0.; //---------------------------- int tNbinsTheta = d_fHyperGeo1F1Info->nBinsTheta; int tNbinsR = d_fHyperGeo1F1Info->nBinsR; //---------------------------- //TODO put in check to make sure GetInterpLowBinCenter does not return the error -2 double tBinWidthK = d_fHyperGeo1F1Info->binWidthK; int tBin0K = GetInterpLowBin(kHyperGeo1F1,kKaxis,aKStar); int tBin1K = tBin0K+1; double tBin0CenterK = GetInterpLowBinCenter(kHyperGeo1F1,kKaxis,aKStar); // double tBin1CenterK = tBin0CenterK+tBinWidthK; double tBinWidthR = d_fHyperGeo1F1Info->binWidthR; int tBin0R = GetInterpLowBin(kHyperGeo1F1,kRaxis,aRStar); int tBin1R = tBin0R+1; double tBin0CenterR = GetInterpLowBinCenter(kHyperGeo1F1,kRaxis,aRStar); // double tBin1CenterR = tBin0CenterR+tBinWidthR; double tBinWidthTheta = d_fHyperGeo1F1Info->binWidthTheta; int tBin0Theta = GetInterpLowBin(kHyperGeo1F1,kThetaaxis,aTheta); int tBin1Theta = tBin0Theta+1; double tBin0CenterTheta = GetInterpLowBinCenter(kHyperGeo1F1,kThetaaxis,aTheta); // double tBin1CenterTheta = tBin0CenterTheta+tBinWidthTheta; //-------------------------- double tDiffK = (aKStar - tBin0CenterK)/tBinWidthK; double tDiffR = (aRStar - tBin0CenterR)/tBinWidthR; double tDiffTheta = (aTheta - tBin0CenterTheta)/tBinWidthTheta; //-----------REAL--------------- //interpolate along z (i.e. theta) double tC000Real = d_fHyperGeo1F1Real[tBin0K*tNbinsTheta*tNbinsR + tBin0R*tNbinsTheta + tBin0Theta]; double tC001Real = d_fHyperGeo1F1Real[tBin0K*tNbinsTheta*tNbinsR + tBin0R*tNbinsTheta + tBin1Theta]; double tC010Real = d_fHyperGeo1F1Real[tBin0K*tNbinsTheta*tNbinsR + tBin1R*tNbinsTheta + tBin0Theta]; double tC011Real = d_fHyperGeo1F1Real[tBin0K*tNbinsTheta*tNbinsR + tBin1R*tNbinsTheta + tBin1Theta]; double tC100Real = d_fHyperGeo1F1Real[tBin1K*tNbinsTheta*tNbinsR + tBin0R*tNbinsTheta + tBin0Theta]; double tC101Real = d_fHyperGeo1F1Real[tBin1K*tNbinsTheta*tNbinsR + tBin0R*tNbinsTheta + tBin1Theta]; double tC110Real = d_fHyperGeo1F1Real[tBin1K*tNbinsTheta*tNbinsR + tBin1R*tNbinsTheta + tBin0Theta]; double tC111Real = d_fHyperGeo1F1Real[tBin1K*tNbinsTheta*tNbinsR + tBin1R*tNbinsTheta + tBin1Theta]; double tC00Real = tC000Real*(1.0-tDiffTheta) + tC001Real*tDiffTheta; double tC01Real = tC010Real*(1.0-tDiffTheta) + tC011Real*tDiffTheta; double tC10Real = tC100Real*(1.0-tDiffTheta) + tC101Real*tDiffTheta; double tC11Real = tC110Real*(1.0-tDiffTheta) + tC111Real*tDiffTheta; //interpolate along y (i.e. r) double tC0Real = tC00Real*(1.0-tDiffR) + tC01Real*tDiffR; double tC1Real = tC10Real*(1.0-tDiffR) + tC11Real*tDiffR; //interpolate along x (i.e. k) tResultReal = tC0Real*(1.0-tDiffK) + tC1Real*tDiffK; //-----------IMAG--------------- //interpolate along z (i.e. theta) double tC000Imag = d_fHyperGeo1F1Imag[tBin0K*tNbinsTheta*tNbinsR + tBin0R*tNbinsTheta + tBin0Theta]; double tC001Imag = d_fHyperGeo1F1Imag[tBin0K*tNbinsTheta*tNbinsR + tBin0R*tNbinsTheta + tBin1Theta]; double tC010Imag = d_fHyperGeo1F1Imag[tBin0K*tNbinsTheta*tNbinsR + tBin1R*tNbinsTheta + tBin0Theta]; double tC011Imag = d_fHyperGeo1F1Imag[tBin0K*tNbinsTheta*tNbinsR + tBin1R*tNbinsTheta + tBin1Theta]; double tC100Imag = d_fHyperGeo1F1Imag[tBin1K*tNbinsTheta*tNbinsR + tBin0R*tNbinsTheta + tBin0Theta]; double tC101Imag = d_fHyperGeo1F1Imag[tBin1K*tNbinsTheta*tNbinsR + tBin0R*tNbinsTheta + tBin1Theta]; double tC110Imag = d_fHyperGeo1F1Imag[tBin1K*tNbinsTheta*tNbinsR + tBin1R*tNbinsTheta + tBin0Theta]; double tC111Imag = d_fHyperGeo1F1Imag[tBin1K*tNbinsTheta*tNbinsR + tBin1R*tNbinsTheta + tBin1Theta]; double tC00Imag = tC000Imag*(1.0-tDiffTheta) + tC001Imag*tDiffTheta; double tC01Imag = tC010Imag*(1.0-tDiffTheta) + tC011Imag*tDiffTheta; double tC10Imag = tC100Imag*(1.0-tDiffTheta) + tC101Imag*tDiffTheta; double tC11Imag = tC110Imag*(1.0-tDiffTheta) + tC111Imag*tDiffTheta; //interpolate along y (i.e. r) double tC0Imag = tC00Imag*(1.0-tDiffR) + tC01Imag*tDiffR; double tC1Imag = tC10Imag*(1.0-tDiffR) + tC11Imag*tDiffR; //interpolate along x (i.e. k) tResultImag = tC0Imag*(1.0-tDiffK) + tC1Imag*tDiffK; //-------------------------------- hipDoubleComplex tReturnValue = make_cuDoubleComplex(tResultReal,tResultImag); return tReturnValue; } /* //________________________________________________________________________________________________________________ __device__ hipDoubleComplex ScattLenInterpolateFull(double aReF0, double aImF0, double aD0, double aKStar) { //This doesn't work because d_fCoulombScatteringLengthReal and d_fCoulombScatteringLengthImag are // too big to fit onto the GPU memory. I am keeping it in case I figure out how to resolve the memory issue // i.e. figure out how to let the device directly access host memory double tResultReal = 0.; double tResultImag = 0.; //---------------------------- int tNbinsK = d_fScattLenInfo->nBinsK; int tNbinsD0 = d_fScattLenInfo->nBinsD0; int tNbinsImF0 = d_fScattLenInfo->nBinsImF0; //---------------------------- //TODO put in check to make sure GetInterpLowBinCenter does not return the error -2 double tBinWidthReF0 = d_fScattLenInfo->binWidthReF0; int tBin0ReF0 = GetInterpLowBin(kScattLen,kReF0axis,aReF0); int tBin1ReF0 = tBin0ReF0+1; double tBin0CenterReF0 = GetInterpLowBinCenter(kScattLen,kReF0axis,aReF0); // double tBin1CenterReF0 = tBin0CenterReF0+tBinWidthReF0; double tBinWidthImF0 = d_fScattLenInfo->binWidthImF0; int tBin0ImF0 = GetInterpLowBin(kScattLen,kImF0axis,aImF0); int tBin1ImF0 = tBin0ImF0+1; double tBin0CenterImF0 = GetInterpLowBinCenter(kScattLen,kImF0axis,aImF0); // double tBin1CenterImF0 = tBin0CenterImF0+tBinWidthImF0; double tBinWidthD0 = d_fScattLenInfo->binWidthD0; int tBin0D0 = GetInterpLowBin(kScattLen,kD0axis,aD0); int tBin1D0 = tBin0D0+1; double tBin0CenterD0 = GetInterpLowBinCenter(kScattLen,kD0axis,aD0); // double tBin1CenterD0 = tBin0CenterD0+tBinWidthD0; double tBinWidthK = d_fScattLenInfo->binWidthK; int tBin0K = GetInterpLowBin(kScattLen,kKaxis,aKStar); int tBin1K = tBin0K+1; double tBin0CenterK = GetInterpLowBinCenter(kScattLen,kKaxis,aKStar); // double tBin1CenterK = tBin0CenterK+tBinWidthK; //-------------------------- double tDiffReF0 = (aReF0 - tBin0CenterReF0)/tBinWidthReF0; double tDiffImF0 = (aImF0 - tBin0CenterImF0)/tBinWidthImF0; double tDiffD0 = (aD0 - tBin0CenterD0)/tBinWidthD0; double tDiffK = (aKStar - tBin0CenterK)/tBinWidthK; //-------------------------- //Assuming f(t,x,y,z) = f(ReF0,ImF0,D0,KStar). Ordering for memory access reasons //---------------REAL---------------------------------- //interpolate along z (i.e. KStar) double tC0000Real = d_fCoulombScatteringLengthReal[tBin0ReF0*tNbinsImF0*tNbinsD0*tNbinsK + tBin0ImF0*tNbinsD0*tNbinsK + tBin0D0*tNbinsK + tBin0K]; double tC0001Real = d_fCoulombScatteringLengthReal[tBin0ReF0*tNbinsImF0*tNbinsD0*tNbinsK + tBin0ImF0*tNbinsD0*tNbinsK + tBin0D0*tNbinsK + tBin1K]; double tC0010Real = d_fCoulombScatteringLengthReal[tBin0ReF0*tNbinsImF0*tNbinsD0*tNbinsK + tBin0ImF0*tNbinsD0*tNbinsK + tBin1D0*tNbinsK + tBin0K]; double tC0011Real = d_fCoulombScatteringLengthReal[tBin0ReF0*tNbinsImF0*tNbinsD0*tNbinsK + tBin0ImF0*tNbinsD0*tNbinsK + tBin1D0*tNbinsK + tBin1K]; double tC0100Real = d_fCoulombScatteringLengthReal[tBin0ReF0*tNbinsImF0*tNbinsD0*tNbinsK + tBin1ImF0*tNbinsD0*tNbinsK + tBin0D0*tNbinsK + tBin0K]; double tC0101Real = d_fCoulombScatteringLengthReal[tBin0ReF0*tNbinsImF0*tNbinsD0*tNbinsK + tBin1ImF0*tNbinsD0*tNbinsK + tBin0D0*tNbinsK + tBin1K]; double tC0110Real = d_fCoulombScatteringLengthReal[tBin0ReF0*tNbinsImF0*tNbinsD0*tNbinsK + tBin1ImF0*tNbinsD0*tNbinsK + tBin1D0*tNbinsK + tBin0K]; double tC0111Real = d_fCoulombScatteringLengthReal[tBin0ReF0*tNbinsImF0*tNbinsD0*tNbinsK + tBin1ImF0*tNbinsD0*tNbinsK + tBin1D0*tNbinsK + tBin1K]; double tC1000Real = d_fCoulombScatteringLengthReal[tBin1ReF0*tNbinsImF0*tNbinsD0*tNbinsK + tBin0ImF0*tNbinsD0*tNbinsK + tBin0D0*tNbinsK + tBin0K]; double tC1001Real = d_fCoulombScatteringLengthReal[tBin1ReF0*tNbinsImF0*tNbinsD0*tNbinsK + tBin0ImF0*tNbinsD0*tNbinsK + tBin0D0*tNbinsK + tBin1K]; double tC1010Real = d_fCoulombScatteringLengthReal[tBin1ReF0*tNbinsImF0*tNbinsD0*tNbinsK + tBin0ImF0*tNbinsD0*tNbinsK + tBin1D0*tNbinsK + tBin0K]; double tC1011Real = d_fCoulombScatteringLengthReal[tBin1ReF0*tNbinsImF0*tNbinsD0*tNbinsK + tBin0ImF0*tNbinsD0*tNbinsK + tBin1D0*tNbinsK + tBin1K]; double tC1100Real = d_fCoulombScatteringLengthReal[tBin1ReF0*tNbinsImF0*tNbinsD0*tNbinsK + tBin1ImF0*tNbinsD0*tNbinsK + tBin0D0*tNbinsK + tBin0K]; double tC1101Real = d_fCoulombScatteringLengthReal[tBin1ReF0*tNbinsImF0*tNbinsD0*tNbinsK + tBin1ImF0*tNbinsD0*tNbinsK + tBin0D0*tNbinsK + tBin1K]; double tC1110Real = d_fCoulombScatteringLengthReal[tBin1ReF0*tNbinsImF0*tNbinsD0*tNbinsK + tBin1ImF0*tNbinsD0*tNbinsK + tBin1D0*tNbinsK + tBin0K]; double tC1111Real = d_fCoulombScatteringLengthReal[tBin1ReF0*tNbinsImF0*tNbinsD0*tNbinsK + tBin1ImF0*tNbinsD0*tNbinsK + tBin1D0*tNbinsK + tBin1K]; //--- double tC000Real = tC0000Real*(1.0-tDiffK) + tC0001Real*tDiffK; double tC001Real = tC0010Real*(1.0-tDiffK) + tC0011Real*tDiffK; double tC010Real = tC0100Real*(1.0-tDiffK) + tC0101Real*tDiffK; double tC011Real = tC0110Real*(1.0-tDiffK) + tC0111Real*tDiffK; double tC100Real = tC1000Real*(1.0-tDiffK) + tC1001Real*tDiffK; double tC101Real = tC1010Real*(1.0-tDiffK) + tC1011Real*tDiffK; double tC110Real = tC1100Real*(1.0-tDiffK) + tC1101Real*tDiffK; double tC111Real = tC1110Real*(1.0-tDiffK) + tC1111Real*tDiffK; //interpolate along y (i.e. D0) double tC00Real = tC000Real*(1.0-tDiffD0) + tC001Real*tDiffD0; double tC01Real = tC010Real*(1.0-tDiffD0) + tC011Real*tDiffD0; double tC10Real = tC100Real*(1.0-tDiffD0) + tC101Real*tDiffD0; double tC11Real = tC110Real*(1.0-tDiffD0) + tC111Real*tDiffD0; //interpolate along x (i.e. ImF0) double tC0Real = tC00Real*(1.0-tDiffImF0) + tC01Real*tDiffImF0; double tC1Real = tC10Real*(1.0-tDiffImF0) + tC11Real*tDiffImF0; //interpolate along t (i.e. ReF0) tResultReal = tC0Real*(1.0-tDiffReF0) + tC1Real*tDiffReF0; //---------------Imag---------------------------------- //interpolate along z (i.e. KStar) double tC0000Imag = d_fCoulombScatteringLengthImag[tBin0ReF0*tNbinsImF0*tNbinsD0*tNbinsK + tBin0ImF0*tNbinsD0*tNbinsK + tBin0D0*tNbinsK + tBin0K]; double tC0001Imag = d_fCoulombScatteringLengthImag[tBin0ReF0*tNbinsImF0*tNbinsD0*tNbinsK + tBin0ImF0*tNbinsD0*tNbinsK + tBin0D0*tNbinsK + tBin1K]; double tC0010Imag = d_fCoulombScatteringLengthImag[tBin0ReF0*tNbinsImF0*tNbinsD0*tNbinsK + tBin0ImF0*tNbinsD0*tNbinsK + tBin1D0*tNbinsK + tBin0K]; double tC0011Imag = d_fCoulombScatteringLengthImag[tBin0ReF0*tNbinsImF0*tNbinsD0*tNbinsK + tBin0ImF0*tNbinsD0*tNbinsK + tBin1D0*tNbinsK + tBin1K]; double tC0100Imag = d_fCoulombScatteringLengthImag[tBin0ReF0*tNbinsImF0*tNbinsD0*tNbinsK + tBin1ImF0*tNbinsD0*tNbinsK + tBin0D0*tNbinsK + tBin0K]; double tC0101Imag = d_fCoulombScatteringLengthImag[tBin0ReF0*tNbinsImF0*tNbinsD0*tNbinsK + tBin1ImF0*tNbinsD0*tNbinsK + tBin0D0*tNbinsK + tBin1K]; double tC0110Imag = d_fCoulombScatteringLengthImag[tBin0ReF0*tNbinsImF0*tNbinsD0*tNbinsK + tBin1ImF0*tNbinsD0*tNbinsK + tBin1D0*tNbinsK + tBin0K]; double tC0111Imag = d_fCoulombScatteringLengthImag[tBin0ReF0*tNbinsImF0*tNbinsD0*tNbinsK + tBin1ImF0*tNbinsD0*tNbinsK + tBin1D0*tNbinsK + tBin1K]; double tC1000Imag = d_fCoulombScatteringLengthImag[tBin1ReF0*tNbinsImF0*tNbinsD0*tNbinsK + tBin0ImF0*tNbinsD0*tNbinsK + tBin0D0*tNbinsK + tBin0K]; double tC1001Imag = d_fCoulombScatteringLengthImag[tBin1ReF0*tNbinsImF0*tNbinsD0*tNbinsK + tBin0ImF0*tNbinsD0*tNbinsK + tBin0D0*tNbinsK + tBin1K]; double tC1010Imag = d_fCoulombScatteringLengthImag[tBin1ReF0*tNbinsImF0*tNbinsD0*tNbinsK + tBin0ImF0*tNbinsD0*tNbinsK + tBin1D0*tNbinsK + tBin0K]; double tC1011Imag = d_fCoulombScatteringLengthImag[tBin1ReF0*tNbinsImF0*tNbinsD0*tNbinsK + tBin0ImF0*tNbinsD0*tNbinsK + tBin1D0*tNbinsK + tBin1K]; double tC1100Imag = d_fCoulombScatteringLengthImag[tBin1ReF0*tNbinsImF0*tNbinsD0*tNbinsK + tBin1ImF0*tNbinsD0*tNbinsK + tBin0D0*tNbinsK + tBin0K]; double tC1101Imag = d_fCoulombScatteringLengthImag[tBin1ReF0*tNbinsImF0*tNbinsD0*tNbinsK + tBin1ImF0*tNbinsD0*tNbinsK + tBin0D0*tNbinsK + tBin1K]; double tC1110Imag = d_fCoulombScatteringLengthImag[tBin1ReF0*tNbinsImF0*tNbinsD0*tNbinsK + tBin1ImF0*tNbinsD0*tNbinsK + tBin1D0*tNbinsK + tBin0K]; double tC1111Imag = d_fCoulombScatteringLengthImag[tBin1ReF0*tNbinsImF0*tNbinsD0*tNbinsK + tBin1ImF0*tNbinsD0*tNbinsK + tBin1D0*tNbinsK + tBin1K]; //--- double tC000Imag = tC0000Imag*(1.0-tDiffK) + tC0001Imag*tDiffK; double tC001Imag = tC0010Imag*(1.0-tDiffK) + tC0011Imag*tDiffK; double tC010Imag = tC0100Imag*(1.0-tDiffK) + tC0101Imag*tDiffK; double tC011Imag = tC0110Imag*(1.0-tDiffK) + tC0111Imag*tDiffK; double tC100Imag = tC1000Imag*(1.0-tDiffK) + tC1001Imag*tDiffK; double tC101Imag = tC1010Imag*(1.0-tDiffK) + tC1011Imag*tDiffK; double tC110Imag = tC1100Imag*(1.0-tDiffK) + tC1101Imag*tDiffK; double tC111Imag = tC1110Imag*(1.0-tDiffK) + tC1111Imag*tDiffK; //interpolate along y (i.e. D0) double tC00Imag = tC000Imag*(1.0-tDiffD0) + tC001Imag*tDiffD0; double tC01Imag = tC010Imag*(1.0-tDiffD0) + tC011Imag*tDiffD0; double tC10Imag = tC100Imag*(1.0-tDiffD0) + tC101Imag*tDiffD0; double tC11Imag = tC110Imag*(1.0-tDiffD0) + tC111Imag*tDiffD0; //interpolate along x (i.e. ImF0) double tC0Imag = tC00Imag*(1.0-tDiffImF0) + tC01Imag*tDiffImF0; double tC1Imag = tC10Imag*(1.0-tDiffImF0) + tC11Imag*tDiffImF0; //interpolate along t (i.e. ReF0) tResultImag = tC0Imag*(1.0-tDiffReF0) + tC1Imag*tDiffReF0; //-------------------------------- hipDoubleComplex tReturnValue = make_cuDoubleComplex(tResultReal,tResultImag); return tReturnValue; } */ //________________________________________________________________________________________________________________ __device__ hipDoubleComplex ScattLenInterpolate(double aReF0, double aImF0, double aD0, double aKStar) { double tResultReal = 0.; double tResultImag = 0.; //---------------------------- int tNbinsK = d_fScattLenInfo->nBinsK; // int tNbinsD0 = d_fScattLenInfo->nBinsD0; // int tNbinsImF0 = d_fScattLenInfo->nBinsImF0; int tNbinsD0 = 2; int tNbinsImF0 = 2; //---------------------------- //TODO put in check to make sure GetInterpLowBinCenter does not return the error -2 double tBinWidthReF0 = d_fScattLenInfo->binWidthReF0; int tBin0ReF0 = 0; int tBin1ReF0 = tBin0ReF0+1; double tBin0CenterReF0 = GetInterpLowBinCenter(kScattLen,kReF0axis,aReF0); // double tBin1CenterReF0 = tBin0CenterReF0+tBinWidthReF0; double tBinWidthImF0 = d_fScattLenInfo->binWidthImF0; int tBin0ImF0 = 0; int tBin1ImF0 = tBin0ImF0+1; double tBin0CenterImF0 = GetInterpLowBinCenter(kScattLen,kImF0axis,aImF0); // double tBin1CenterImF0 = tBin0CenterImF0+tBinWidthImF0; double tBinWidthD0 = d_fScattLenInfo->binWidthD0; int tBin0D0 = 0; int tBin1D0 = tBin0D0+1; double tBin0CenterD0 = GetInterpLowBinCenter(kScattLen,kD0axis,aD0); // double tBin1CenterD0 = tBin0CenterD0+tBinWidthD0; double tBinWidthK = d_fScattLenInfo->binWidthK; int tBin0K = GetInterpLowBin(kScattLen,kKaxis,aKStar); int tBin1K = tBin0K+1; double tBin0CenterK = GetInterpLowBinCenter(kScattLen,kKaxis,aKStar); // double tBin1CenterK = tBin0CenterK+tBinWidthK; //-------------------------- assert(tBin0K>=0); assert(tBin0CenterK>0); double tDiffReF0 = (aReF0 - tBin0CenterReF0)/tBinWidthReF0; double tDiffImF0 = (aImF0 - tBin0CenterImF0)/tBinWidthImF0; double tDiffD0 = (aD0 - tBin0CenterD0)/tBinWidthD0; double tDiffK = (aKStar - tBin0CenterK)/tBinWidthK; //-------------------------- //Assuming f(t,x,y,z) = f(ReF0,ImF0,D0,KStar). Ordering for memory access reasons //---------------REAL---------------------------------- //interpolate along z (i.e. KStar) double tC0000Real = d_fScattLenRealSubVec[tBin0ReF0*tNbinsImF0*tNbinsD0*tNbinsK + tBin0ImF0*tNbinsD0*tNbinsK + tBin0D0*tNbinsK + tBin0K]; double tC0001Real = d_fScattLenRealSubVec[tBin0ReF0*tNbinsImF0*tNbinsD0*tNbinsK + tBin0ImF0*tNbinsD0*tNbinsK + tBin0D0*tNbinsK + tBin1K]; double tC0010Real = d_fScattLenRealSubVec[tBin0ReF0*tNbinsImF0*tNbinsD0*tNbinsK + tBin0ImF0*tNbinsD0*tNbinsK + tBin1D0*tNbinsK + tBin0K]; double tC0011Real = d_fScattLenRealSubVec[tBin0ReF0*tNbinsImF0*tNbinsD0*tNbinsK + tBin0ImF0*tNbinsD0*tNbinsK + tBin1D0*tNbinsK + tBin1K]; double tC0100Real = d_fScattLenRealSubVec[tBin0ReF0*tNbinsImF0*tNbinsD0*tNbinsK + tBin1ImF0*tNbinsD0*tNbinsK + tBin0D0*tNbinsK + tBin0K]; double tC0101Real = d_fScattLenRealSubVec[tBin0ReF0*tNbinsImF0*tNbinsD0*tNbinsK + tBin1ImF0*tNbinsD0*tNbinsK + tBin0D0*tNbinsK + tBin1K]; double tC0110Real = d_fScattLenRealSubVec[tBin0ReF0*tNbinsImF0*tNbinsD0*tNbinsK + tBin1ImF0*tNbinsD0*tNbinsK + tBin1D0*tNbinsK + tBin0K]; double tC0111Real = d_fScattLenRealSubVec[tBin0ReF0*tNbinsImF0*tNbinsD0*tNbinsK + tBin1ImF0*tNbinsD0*tNbinsK + tBin1D0*tNbinsK + tBin1K]; double tC1000Real = d_fScattLenRealSubVec[tBin1ReF0*tNbinsImF0*tNbinsD0*tNbinsK + tBin0ImF0*tNbinsD0*tNbinsK + tBin0D0*tNbinsK + tBin0K]; double tC1001Real = d_fScattLenRealSubVec[tBin1ReF0*tNbinsImF0*tNbinsD0*tNbinsK + tBin0ImF0*tNbinsD0*tNbinsK + tBin0D0*tNbinsK + tBin1K]; double tC1010Real = d_fScattLenRealSubVec[tBin1ReF0*tNbinsImF0*tNbinsD0*tNbinsK + tBin0ImF0*tNbinsD0*tNbinsK + tBin1D0*tNbinsK + tBin0K]; double tC1011Real = d_fScattLenRealSubVec[tBin1ReF0*tNbinsImF0*tNbinsD0*tNbinsK + tBin0ImF0*tNbinsD0*tNbinsK + tBin1D0*tNbinsK + tBin1K]; double tC1100Real = d_fScattLenRealSubVec[tBin1ReF0*tNbinsImF0*tNbinsD0*tNbinsK + tBin1ImF0*tNbinsD0*tNbinsK + tBin0D0*tNbinsK + tBin0K]; double tC1101Real = d_fScattLenRealSubVec[tBin1ReF0*tNbinsImF0*tNbinsD0*tNbinsK + tBin1ImF0*tNbinsD0*tNbinsK + tBin0D0*tNbinsK + tBin1K]; double tC1110Real = d_fScattLenRealSubVec[tBin1ReF0*tNbinsImF0*tNbinsD0*tNbinsK + tBin1ImF0*tNbinsD0*tNbinsK + tBin1D0*tNbinsK + tBin0K]; double tC1111Real = d_fScattLenRealSubVec[tBin1ReF0*tNbinsImF0*tNbinsD0*tNbinsK + tBin1ImF0*tNbinsD0*tNbinsK + tBin1D0*tNbinsK + tBin1K]; //--- double tC000Real = tC0000Real*(1.0-tDiffK) + tC0001Real*tDiffK; double tC001Real = tC0010Real*(1.0-tDiffK) + tC0011Real*tDiffK; double tC010Real = tC0100Real*(1.0-tDiffK) + tC0101Real*tDiffK; double tC011Real = tC0110Real*(1.0-tDiffK) + tC0111Real*tDiffK; double tC100Real = tC1000Real*(1.0-tDiffK) + tC1001Real*tDiffK; double tC101Real = tC1010Real*(1.0-tDiffK) + tC1011Real*tDiffK; double tC110Real = tC1100Real*(1.0-tDiffK) + tC1101Real*tDiffK; double tC111Real = tC1110Real*(1.0-tDiffK) + tC1111Real*tDiffK; //interpolate along y (i.e. D0) double tC00Real = tC000Real*(1.0-tDiffD0) + tC001Real*tDiffD0; double tC01Real = tC010Real*(1.0-tDiffD0) + tC011Real*tDiffD0; double tC10Real = tC100Real*(1.0-tDiffD0) + tC101Real*tDiffD0; double tC11Real = tC110Real*(1.0-tDiffD0) + tC111Real*tDiffD0; //interpolate along x (i.e. ImF0) double tC0Real = tC00Real*(1.0-tDiffImF0) + tC01Real*tDiffImF0; double tC1Real = tC10Real*(1.0-tDiffImF0) + tC11Real*tDiffImF0; //interpolate along t (i.e. ReF0) tResultReal = tC0Real*(1.0-tDiffReF0) + tC1Real*tDiffReF0; //---------------Imag---------------------------------- //interpolate along z (i.e. KStar) double tC0000Imag = d_fScattLenImagSubVec[tBin0ReF0*tNbinsImF0*tNbinsD0*tNbinsK + tBin0ImF0*tNbinsD0*tNbinsK + tBin0D0*tNbinsK + tBin0K]; double tC0001Imag = d_fScattLenImagSubVec[tBin0ReF0*tNbinsImF0*tNbinsD0*tNbinsK + tBin0ImF0*tNbinsD0*tNbinsK + tBin0D0*tNbinsK + tBin1K]; double tC0010Imag = d_fScattLenImagSubVec[tBin0ReF0*tNbinsImF0*tNbinsD0*tNbinsK + tBin0ImF0*tNbinsD0*tNbinsK + tBin1D0*tNbinsK + tBin0K]; double tC0011Imag = d_fScattLenImagSubVec[tBin0ReF0*tNbinsImF0*tNbinsD0*tNbinsK + tBin0ImF0*tNbinsD0*tNbinsK + tBin1D0*tNbinsK + tBin1K]; double tC0100Imag = d_fScattLenImagSubVec[tBin0ReF0*tNbinsImF0*tNbinsD0*tNbinsK + tBin1ImF0*tNbinsD0*tNbinsK + tBin0D0*tNbinsK + tBin0K]; double tC0101Imag = d_fScattLenImagSubVec[tBin0ReF0*tNbinsImF0*tNbinsD0*tNbinsK + tBin1ImF0*tNbinsD0*tNbinsK + tBin0D0*tNbinsK + tBin1K]; double tC0110Imag = d_fScattLenImagSubVec[tBin0ReF0*tNbinsImF0*tNbinsD0*tNbinsK + tBin1ImF0*tNbinsD0*tNbinsK + tBin1D0*tNbinsK + tBin0K]; double tC0111Imag = d_fScattLenImagSubVec[tBin0ReF0*tNbinsImF0*tNbinsD0*tNbinsK + tBin1ImF0*tNbinsD0*tNbinsK + tBin1D0*tNbinsK + tBin1K]; double tC1000Imag = d_fScattLenImagSubVec[tBin1ReF0*tNbinsImF0*tNbinsD0*tNbinsK + tBin0ImF0*tNbinsD0*tNbinsK + tBin0D0*tNbinsK + tBin0K]; double tC1001Imag = d_fScattLenImagSubVec[tBin1ReF0*tNbinsImF0*tNbinsD0*tNbinsK + tBin0ImF0*tNbinsD0*tNbinsK + tBin0D0*tNbinsK + tBin1K]; double tC1010Imag = d_fScattLenImagSubVec[tBin1ReF0*tNbinsImF0*tNbinsD0*tNbinsK + tBin0ImF0*tNbinsD0*tNbinsK + tBin1D0*tNbinsK + tBin0K]; double tC1011Imag = d_fScattLenImagSubVec[tBin1ReF0*tNbinsImF0*tNbinsD0*tNbinsK + tBin0ImF0*tNbinsD0*tNbinsK + tBin1D0*tNbinsK + tBin1K]; double tC1100Imag = d_fScattLenImagSubVec[tBin1ReF0*tNbinsImF0*tNbinsD0*tNbinsK + tBin1ImF0*tNbinsD0*tNbinsK + tBin0D0*tNbinsK + tBin0K]; double tC1101Imag = d_fScattLenImagSubVec[tBin1ReF0*tNbinsImF0*tNbinsD0*tNbinsK + tBin1ImF0*tNbinsD0*tNbinsK + tBin0D0*tNbinsK + tBin1K]; double tC1110Imag = d_fScattLenImagSubVec[tBin1ReF0*tNbinsImF0*tNbinsD0*tNbinsK + tBin1ImF0*tNbinsD0*tNbinsK + tBin1D0*tNbinsK + tBin0K]; double tC1111Imag = d_fScattLenImagSubVec[tBin1ReF0*tNbinsImF0*tNbinsD0*tNbinsK + tBin1ImF0*tNbinsD0*tNbinsK + tBin1D0*tNbinsK + tBin1K]; //--- double tC000Imag = tC0000Imag*(1.0-tDiffK) + tC0001Imag*tDiffK; double tC001Imag = tC0010Imag*(1.0-tDiffK) + tC0011Imag*tDiffK; double tC010Imag = tC0100Imag*(1.0-tDiffK) + tC0101Imag*tDiffK; double tC011Imag = tC0110Imag*(1.0-tDiffK) + tC0111Imag*tDiffK; double tC100Imag = tC1000Imag*(1.0-tDiffK) + tC1001Imag*tDiffK; double tC101Imag = tC1010Imag*(1.0-tDiffK) + tC1011Imag*tDiffK; double tC110Imag = tC1100Imag*(1.0-tDiffK) + tC1101Imag*tDiffK; double tC111Imag = tC1110Imag*(1.0-tDiffK) + tC1111Imag*tDiffK; //interpolate along y (i.e. D0) double tC00Imag = tC000Imag*(1.0-tDiffD0) + tC001Imag*tDiffD0; double tC01Imag = tC010Imag*(1.0-tDiffD0) + tC011Imag*tDiffD0; double tC10Imag = tC100Imag*(1.0-tDiffD0) + tC101Imag*tDiffD0; double tC11Imag = tC110Imag*(1.0-tDiffD0) + tC111Imag*tDiffD0; //interpolate along x (i.e. ImF0) double tC0Imag = tC00Imag*(1.0-tDiffImF0) + tC01Imag*tDiffImF0; double tC1Imag = tC10Imag*(1.0-tDiffImF0) + tC11Imag*tDiffImF0; //interpolate along t (i.e. ReF0) tResultImag = tC0Imag*(1.0-tDiffReF0) + tC1Imag*tDiffReF0; //-------------------------------- hipDoubleComplex tReturnValue = make_cuDoubleComplex(tResultReal,tResultImag); return tReturnValue; } //________________________________________________________________________________________________________________ __device__ double GetGamowFactor(double aKStar) { double d_hbarc = 0.197327; double d_gBohrRadius = 75.23349845; //TODO figure out how to load hbarc and gBohrRadius into GPU //TODO figure out how to use Pi here //TODO figure out how to make bohr radius negative when needed double tEta = pow(((aKStar/d_hbarc)*d_gBohrRadius),-1); tEta *= 6.28318530718; //eta always comes with 2Pi here double tGamow = tEta*pow((exp(tEta)-1),-1); return tGamow; } //________________________________________________________________________________________________________________ __device__ hipDoubleComplex GetExpTerm(double aKStar, double aRStar, double aTheta) { //TODO figure out how to load hbarc and gBohrRadius into GPU double d_hbarc = 0.197327; double tReal = cos((aKStar/d_hbarc)*aRStar*cos(aTheta)); double tImag = -sin((aKStar/d_hbarc)*aRStar*cos(aTheta)); hipDoubleComplex tExpTermCmplx = make_cuDoubleComplex(tReal,tImag); return tExpTermCmplx; } //________________________________________________________________________________________________________________ __device__ double AssembleWfSquared(double aRStarMag, double aGamowFactor, hipDoubleComplex aExpTermCmplx, hipDoubleComplex aGTildeCmplx, hipDoubleComplex aHyperGeo1F1Cmplx, hipDoubleComplex aScattLenCmplx) { hipDoubleComplex tGTildeCmplxConj = cuConj(aGTildeCmplx); hipDoubleComplex tScattLenCmplxConj = cuConj(aScattLenCmplx); // hipDoubleComplex tGamowFactor = make_cuDoubleComplex(aGamowFactor,0.); //cuda doesn't want to multiply double*double2 //-------------Stupid cuda can only multiple/divide two at once //TODO test to see if there is an easier way to accomplish this double tMagSq_HyperGeo1F1 = cuCabs(aHyperGeo1F1Cmplx)*cuCabs(aHyperGeo1F1Cmplx); double tMagSq_ScattLen = cuCabs(tScattLenCmplxConj)*cuCabs(tScattLenCmplxConj); double tMagSq_GTilde = cuCabs(tGTildeCmplxConj)*cuCabs(tGTildeCmplxConj); hipDoubleComplex tTerm1 = cuCmul(aExpTermCmplx,aHyperGeo1F1Cmplx); hipDoubleComplex tTerm2 = cuCmul(tScattLenCmplxConj,tGTildeCmplxConj); hipDoubleComplex tTerm12 = cuCmul(tTerm1,tTerm2); double tTerm12Real = cuCreal(tTerm12); double tTermFinal = tTerm12Real/aRStarMag; /* hipDoubleComplex tRStarMagCmplx = make_cuDoubleComplex(aRStarMag,0.); hipDoubleComplex tTermFinalCmplx = cuCdiv(tTerm12,tRStarMagCmplx); double tTermFinal = cuCreal(tTermFinalCmplx); */ double tResult = aGamowFactor*(tMagSq_HyperGeo1F1 + tMagSq_ScattLen*tMagSq_GTilde/(aRStarMag*aRStarMag) + 2.0*tTermFinal); return tResult; /* hipDoubleComplex tResultComplex = tGamowFactor*( cuCabs(aHyperGeo1F1Cmplx)*cuCabs(aHyperGeo1F1Cmplx) + cuCabs(tScattLenCmplxConj)*cuCabs(tScattLenCmplxConj)*cuCabs(tGTildeCmplxConj)*cuCabs(tGTildeCmplxConj)/(aRStarMag*aRStarMag) + 2.*cuCreal(aExpTermCmplx*aHyperGeo1F1Cmplx*tScattLenCmplxConj*tGTildeCmplxConj/aRStarMag) ); //TODO put in check to make sure there is no imaginary part // if(imag(tResultComplex) > std::numeric_limits< double >::min()) cout << "\t\t\t !!!!!!!!! Imaginary value in ParellelWaveFunction::InterpolateWfSquared !!!!!" << endl; // assert(imag(tResultComplex) < std::numeric_limits< double >::min()); return cuCreal(tResultComplex); */ } //________________________________________________________________________________________________________________ __global__ void InterpolateWfSquared(double *aKStarMag, double *aRStarMag, double *aTheta, double aReF0, double aImF0, double aD0, double *aWfSquared) { //TODO figure out which thread int idx = threadIdx.x + blockIdx.x*blockDim.x; double tGamow = GetGamowFactor(aKStarMag[idx]); hipDoubleComplex tExpTermCmplx = GetExpTerm(aKStarMag[idx],aRStarMag[idx],aTheta[idx]); hipDoubleComplex tGTildeCmplx, tHyperGeo1F1Cmplx, tScattLenCmplx; tGTildeCmplx = GTildeInterpolate(aKStarMag[idx],aRStarMag[idx]); tHyperGeo1F1Cmplx = HyperGeo1F1Interpolate(aKStarMag[idx],aRStarMag[idx],aTheta[idx]); tScattLenCmplx = ScattLenInterpolate(aReF0,aImF0,aD0,aKStarMag[idx]); double tResult = AssembleWfSquared(aRStarMag[idx],tGamow,tExpTermCmplx,tGTildeCmplx,tHyperGeo1F1Cmplx,tScattLenCmplx); aWfSquared[idx] = tResult; } //________________________________________________________________________________________________________________ //**************************************************************************************************************** //________________________________________________________________________________________________________________ ParallelWaveFunction::ParallelWaveFunction(int aNThreadsPerBlock, int aNBlocks): fNThreadsPerBlock(aNThreadsPerBlock), fNBlocks(aNBlocks) { hipSetDeviceFlags(hipDeviceMapHost); } //________________________________________________________________________________________________________________ ParallelWaveFunction::~ParallelWaveFunction() { checkCudaErrors(hipFree(d_fGTildeReal)); checkCudaErrors(hipFree(d_fGTildeImag)); checkCudaErrors(hipFree(d_fGTildeInfo)); checkCudaErrors(hipFree(d_fHyperGeo1F1Real)); checkCudaErrors(hipFree(d_fHyperGeo1F1Imag)); checkCudaErrors(hipFree(d_fHyperGeo1F1Info)); // checkCudaErrors(hipFree(d_fCoulombScatteringLengthReal)); // checkCudaErrors(hipFree(d_fCoulombScatteringLengthImag)); checkCudaErrors(hipFree(d_fScattLenInfo)); } //________________________________________________________________________________________________________________ void ParallelWaveFunction::LoadGTildeReal(td2dVec &aGTildeReal) { int tNbinsK = aGTildeReal.size(); int tNbinsR = aGTildeReal[0].size(); int tSize = tNbinsK*tNbinsR*sizeof(double); checkCudaErrors(hipMallocManaged(&d_fGTildeReal, tSize)); int tIndex; for(int iK=0; iK<tNbinsK; iK++) { for(int iR=0; iR<tNbinsR; iR++) { tIndex = iR + iK*tNbinsR; d_fGTildeReal[tIndex] = aGTildeReal[iK][iR]; } } } //________________________________________________________________________________________________________________ void ParallelWaveFunction::LoadGTildeImag(td2dVec &aGTildeImag) { int tNbinsK = aGTildeImag.size(); int tNbinsR = aGTildeImag[0].size(); int tSize = tNbinsK*tNbinsR*sizeof(double); checkCudaErrors(hipMallocManaged(&d_fGTildeImag, tSize)); int tIndex; for(int iK=0; iK<tNbinsK; iK++) { for(int iR=0; iR<tNbinsR; iR++) { tIndex = iR + iK*tNbinsR; d_fGTildeImag[tIndex] = aGTildeImag[iK][iR]; } } } //________________________________________________________________________________________________________________ void ParallelWaveFunction::LoadHyperGeo1F1Real(td3dVec &aHyperGeo1F1Real) { int tNbinsK = aHyperGeo1F1Real.size(); int tNbinsR = aHyperGeo1F1Real[0].size(); int tNbinsTheta = aHyperGeo1F1Real[0][0].size(); int tSize = tNbinsK*tNbinsR*tNbinsTheta*sizeof(double); checkCudaErrors(hipMallocManaged(&d_fHyperGeo1F1Real, tSize)); int tIndex; for(int iK=0; iK<tNbinsK; iK++) { for(int iR=0; iR<tNbinsR; iR++) { for(int iTheta=0; iTheta<tNbinsTheta; iTheta++) { tIndex = iTheta + iR*tNbinsTheta + iK*tNbinsTheta*tNbinsR; d_fHyperGeo1F1Real[tIndex] = aHyperGeo1F1Real[iK][iR][iTheta]; } } } } //________________________________________________________________________________________________________________ void ParallelWaveFunction::LoadHyperGeo1F1Imag(td3dVec &aHyperGeo1F1Imag) { int tNbinsK = aHyperGeo1F1Imag.size(); int tNbinsR = aHyperGeo1F1Imag[0].size(); int tNbinsTheta = aHyperGeo1F1Imag[0][0].size(); int tSize = tNbinsK*tNbinsR*tNbinsTheta*sizeof(double); checkCudaErrors(hipMallocManaged(&d_fHyperGeo1F1Imag, tSize)); int tIndex; for(int iK=0; iK<tNbinsK; iK++) { for(int iR=0; iR<tNbinsR; iR++) { for(int iTheta=0; iTheta<tNbinsTheta; iTheta++) { tIndex = iTheta + iR*tNbinsTheta + iK*tNbinsTheta*tNbinsR; d_fHyperGeo1F1Imag[tIndex] = aHyperGeo1F1Imag[iK][iR][iTheta]; } } } } //________________________________________________________________________________________________________________ void ParallelWaveFunction::LoadScattLenReal(td4dVec &aScattLenReal) { int tNbinsReF0 = aScattLenReal.size(); int tNbinsImF0 = aScattLenReal[0].size(); int tNbinsD0 = aScattLenReal[0][0].size(); int tNbinsK = aScattLenReal[0][0][0].size(); int tSize = tNbinsReF0*tNbinsImF0*tNbinsD0*tNbinsK*sizeof(double); checkCudaErrors(hipMallocManaged(&d_fCoulombScatteringLengthReal, tSize)); int tIndex; for(int iReF0=0; iReF0<tNbinsReF0; iReF0++) { for(int iImF0=0; iImF0<tNbinsImF0; iImF0++) { for(int iD0=0; iD0<tNbinsD0; iD0++) { for(int iK=0; iK<tNbinsK; iK++) { tIndex = iK + iD0*tNbinsK + iImF0*tNbinsK*tNbinsD0 + iReF0*tNbinsK*tNbinsD0*tNbinsImF0; d_fCoulombScatteringLengthReal[tIndex] = aScattLenReal[iReF0][iImF0][iD0][iK]; } } } } } //________________________________________________________________________________________________________________ void ParallelWaveFunction::LoadScattLenImag(td4dVec &aScattLenImag) { int tNbinsReF0 = aScattLenImag.size(); int tNbinsImF0 = aScattLenImag[0].size(); int tNbinsD0 = aScattLenImag[0][0].size(); int tNbinsK = aScattLenImag[0][0][0].size(); int tSize = tNbinsReF0*tNbinsImF0*tNbinsD0*tNbinsK*sizeof(double); checkCudaErrors(hipMallocManaged(&d_fCoulombScatteringLengthImag, tSize)); int tIndex; for(int iReF0=0; iReF0<tNbinsReF0; iReF0++) { for(int iImF0=0; iImF0<tNbinsImF0; iImF0++) { for(int iD0=0; iD0<tNbinsD0; iD0++) { for(int iK=0; iK<tNbinsK; iK++) { tIndex = iK + iD0*tNbinsK + iImF0*tNbinsK*tNbinsD0 + iReF0*tNbinsK*tNbinsD0*tNbinsImF0; d_fCoulombScatteringLengthImag[tIndex] = aScattLenImag[iReF0][iImF0][iD0][iK]; } } } } } //________________________________________________________________________________________________________________ void ParallelWaveFunction::LoadScattLenRealSub(td4dVec &aScattLenReal) { int tNbinsReF0 = aScattLenReal.size(); int tNbinsImF0 = aScattLenReal[0].size(); int tNbinsD0 = aScattLenReal[0][0].size(); int tNbinsK = aScattLenReal[0][0][0].size(); int tSize = tNbinsReF0*tNbinsImF0*tNbinsD0*tNbinsK*sizeof(double); checkCudaErrors(hipMallocManaged(&d_fScattLenRealSubVec, tSize)); int tIndex; for(int iReF0=0; iReF0<tNbinsReF0; iReF0++) { for(int iImF0=0; iImF0<tNbinsImF0; iImF0++) { for(int iD0=0; iD0<tNbinsD0; iD0++) { for(int iK=0; iK<tNbinsK; iK++) { tIndex = iK + iD0*tNbinsK + iImF0*tNbinsK*tNbinsD0 + iReF0*tNbinsK*tNbinsD0*tNbinsImF0; d_fScattLenRealSubVec[tIndex] = aScattLenReal[iReF0][iImF0][iD0][iK]; } } } } } //________________________________________________________________________________________________________________ void ParallelWaveFunction::LoadScattLenImagSub(td4dVec &aScattLenImag) { int tNbinsReF0 = aScattLenImag.size(); int tNbinsImF0 = aScattLenImag[0].size(); int tNbinsD0 = aScattLenImag[0][0].size(); int tNbinsK = aScattLenImag[0][0][0].size(); int tSize = tNbinsReF0*tNbinsImF0*tNbinsD0*tNbinsK*sizeof(double); checkCudaErrors(hipMallocManaged(&d_fScattLenImagSubVec, tSize)); int tIndex; for(int iReF0=0; iReF0<tNbinsReF0; iReF0++) { for(int iImF0=0; iImF0<tNbinsImF0; iImF0++) { for(int iD0=0; iD0<tNbinsD0; iD0++) { for(int iK=0; iK<tNbinsK; iK++) { tIndex = iK + iD0*tNbinsK + iImF0*tNbinsK*tNbinsD0 + iReF0*tNbinsK*tNbinsD0*tNbinsImF0; d_fScattLenImagSubVec[tIndex] = aScattLenImag[iReF0][iImF0][iD0][iK]; } } } } } //________________________________________________________________________________________________________________ void ParallelWaveFunction::UnLoadScattLenRealSub() { checkCudaErrors(hipFree(d_fScattLenRealSubVec)); } //________________________________________________________________________________________________________________ void ParallelWaveFunction::UnLoadScattLenImagSub() { checkCudaErrors(hipFree(d_fScattLenImagSubVec)); } //________________________________________________________________________________________________________________ void ParallelWaveFunction::LoadGTildeInfo(BinInfoGTilde &aBinInfo) { checkCudaErrors(hipMallocManaged(&d_fGTildeInfo, sizeof(BinInfoGTilde))); d_fGTildeInfo->nBinsK = aBinInfo.nBinsK; d_fGTildeInfo->nBinsR = aBinInfo.nBinsR; d_fGTildeInfo->binWidthK = aBinInfo.binWidthK; d_fGTildeInfo->binWidthR = aBinInfo.binWidthR; d_fGTildeInfo->minK = aBinInfo.minK; d_fGTildeInfo->maxK = aBinInfo.maxK; d_fGTildeInfo->minR = aBinInfo.minR; d_fGTildeInfo->maxR = aBinInfo.maxR; d_fGTildeInfo->minInterpK = aBinInfo.minInterpK; d_fGTildeInfo->maxInterpK = aBinInfo.maxInterpK; d_fGTildeInfo->minInterpR = aBinInfo.minInterpR; d_fGTildeInfo->maxInterpR = aBinInfo.maxInterpR; } //________________________________________________________________________________________________________________ void ParallelWaveFunction::LoadHyperGeo1F1Info(BinInfoHyperGeo1F1 &aBinInfo) { checkCudaErrors(hipMallocManaged(&d_fHyperGeo1F1Info, sizeof(BinInfoHyperGeo1F1))); d_fHyperGeo1F1Info->nBinsK = aBinInfo.nBinsK; d_fHyperGeo1F1Info->nBinsR = aBinInfo.nBinsR; d_fHyperGeo1F1Info->nBinsTheta = aBinInfo.nBinsTheta; d_fHyperGeo1F1Info->binWidthK = aBinInfo.binWidthK; d_fHyperGeo1F1Info->binWidthR = aBinInfo.binWidthR; d_fHyperGeo1F1Info->binWidthTheta = aBinInfo.binWidthTheta; d_fHyperGeo1F1Info->minK = aBinInfo.minK; d_fHyperGeo1F1Info->maxK = aBinInfo.maxK; d_fHyperGeo1F1Info->minR = aBinInfo.minR; d_fHyperGeo1F1Info->maxR = aBinInfo.maxR; d_fHyperGeo1F1Info->minTheta = aBinInfo.minTheta; d_fHyperGeo1F1Info->maxTheta = aBinInfo.maxTheta; d_fHyperGeo1F1Info->minInterpK = aBinInfo.minInterpK; d_fHyperGeo1F1Info->maxInterpK = aBinInfo.maxInterpK; d_fHyperGeo1F1Info->minInterpR = aBinInfo.minInterpR; d_fHyperGeo1F1Info->maxInterpR = aBinInfo.maxInterpR; d_fHyperGeo1F1Info->minInterpTheta = aBinInfo.minInterpTheta; d_fHyperGeo1F1Info->maxInterpTheta = aBinInfo.maxInterpTheta; } //________________________________________________________________________________________________________________ void ParallelWaveFunction::LoadScattLenInfo(BinInfoScattLen &aBinInfo) { checkCudaErrors(hipMallocManaged(&d_fScattLenInfo, sizeof(BinInfoScattLen))); d_fScattLenInfo->nBinsReF0 = aBinInfo.nBinsReF0; d_fScattLenInfo->nBinsImF0 = aBinInfo.nBinsImF0; d_fScattLenInfo->nBinsD0 = aBinInfo.nBinsD0; d_fScattLenInfo->nBinsK = aBinInfo.nBinsK; d_fScattLenInfo->binWidthReF0 = aBinInfo.binWidthReF0; d_fScattLenInfo->binWidthImF0 = aBinInfo.binWidthImF0; d_fScattLenInfo->binWidthD0 = aBinInfo.binWidthD0; d_fScattLenInfo->binWidthK = aBinInfo.binWidthK; d_fScattLenInfo->minReF0 = aBinInfo.minReF0; d_fScattLenInfo->maxReF0 = aBinInfo.maxReF0; d_fScattLenInfo->minImF0 = aBinInfo.minImF0; d_fScattLenInfo->maxImF0 = aBinInfo.maxImF0; d_fScattLenInfo->minD0 = aBinInfo.minD0; d_fScattLenInfo->maxD0 = aBinInfo.maxD0; d_fScattLenInfo->minK = aBinInfo.minK; d_fScattLenInfo->maxK = aBinInfo.maxK; d_fScattLenInfo->minInterpReF0 = aBinInfo.minInterpReF0; d_fScattLenInfo->maxInterpReF0 = aBinInfo.maxInterpReF0; d_fScattLenInfo->minInterpImF0 = aBinInfo.minInterpImF0; d_fScattLenInfo->maxInterpImF0 = aBinInfo.maxInterpImF0; d_fScattLenInfo->minInterpD0 = aBinInfo.minInterpD0; d_fScattLenInfo->maxInterpD0 = aBinInfo.maxInterpD0; d_fScattLenInfo->minInterpK = aBinInfo.minInterpK; d_fScattLenInfo->maxInterpK = aBinInfo.maxInterpK; } //________________________________________________________________________________________________________________ //double* ParallelWaveFunction::RunInterpolateWfSquared(td2dVec &aPairs, double aReF0, double aImF0, double aD0) vector<double> ParallelWaveFunction::RunInterpolateWfSquared(td2dVec &aPairs, double aReF0, double aImF0, double aD0) { int tNPairs = aPairs.size(); int tSize = tNPairs*sizeof(double); //---Host arrays and allocations double * h_KStarMag; double * h_RStarMag; double * h_Theta; double * h_WfSquared; /* checkCudaErrors(hipHostMalloc((void**) &h_KStarMag, tSize, hipHostMallocMapped)); checkCudaErrors(hipHostMalloc((void**) &h_RStarMag, tSize, hipHostMallocMapped)); checkCudaErrors(hipHostMalloc((void**) &h_Theta, tSize, hipHostMallocMapped)); checkCudaErrors(hipHostMalloc((void**) &h_WfSquared, tSize, hipHostMallocMapped)); */ checkCudaErrors(hipMallocManaged(&h_KStarMag, tSize)); checkCudaErrors(hipMallocManaged(&h_RStarMag, tSize)); checkCudaErrors(hipMallocManaged(&h_Theta, tSize)); checkCudaErrors(hipMallocManaged(&h_WfSquared, tSize)); for(int i=0; i<tNPairs; i++) { h_KStarMag[i] = aPairs[i][0]; h_RStarMag[i] = aPairs[i][1]; h_Theta[i] = aPairs[i][2]; } /* //---Device arrays and allocations double * d_KStarMag; double * d_RStarMag; double * d_Theta; double * d_WfSquared; checkCudaErrors(hipHostGetDevicePointer(&d_KStarMag, h_KStarMag, 0)); checkCudaErrors(hipHostGetDevicePointer(&d_RStarMag, h_RStarMag, 0)); checkCudaErrors(hipHostGetDevicePointer(&d_Theta, h_Theta, 0)); checkCudaErrors(hipHostGetDevicePointer(&d_WfSquared, h_WfSquared, 0)); */ //----------Run the kernel----------------------------------------------- GpuTimer timer; timer.Start(); hipLaunchKernelGGL(( InterpolateWfSquared), dim3(fNBlocks),dim3(fNThreadsPerBlock), 0, 0, h_KStarMag,h_RStarMag,h_Theta,aReF0,aImF0,aD0,h_WfSquared); timer.Stop(); std::cout << "InterpolateWfSquared kernel finished in " << timer.Elapsed() << " ms" << std::endl; //The following is necessary for the host to be able to "see" the changes that have been done checkCudaErrors(hipDeviceSynchronize()); /* checkCudaErrors(hipHostFree(h_KStarMag)); checkCudaErrors(hipHostFree(h_RStarMag)); checkCudaErrors(hipHostFree(h_Theta)); */ checkCudaErrors(hipFree(h_KStarMag)); checkCudaErrors(hipFree(h_RStarMag)); checkCudaErrors(hipFree(h_Theta)); // return h_WfSquared; vector<double> tReturnVec(tNPairs); for(int i=0; i<tNPairs; i++) { tReturnVec[i] = h_WfSquared[i]; // cout << "i = " << i << endl; // cout << "h_WfSquared[i] = " << h_WfSquared[i] << endl; // cout << "tReturnVec[i] = " << tReturnVec[i] << endl << endl; } // checkCudaErrors(hipHostFree(h_WfSquared)); checkCudaErrors(hipFree(h_WfSquared)); return tReturnVec; }
6ea4770b96047fcfa29bfcd2b275722f6967678d.cu
/////////////////////////////////////////////////////////////////////////// // ParallelWaveFunction: // /////////////////////////////////////////////////////////////////////////// #include "ParallelWaveFunction.h" //________________________________________________________________________________________________________________ __device__ int GetBinNumber(double aBinSize, int aNbins, double aValue) { //TODO check the accuracy of this double tBinKStarMin, tBinKStarMax; for(int i=0; i<aNbins; i++) { tBinKStarMin = i*aBinSize; tBinKStarMax = (i+1)*aBinSize; if(aValue>=tBinKStarMin && aValue<tBinKStarMax) return i; } return -1; //i.e. failure } //________________________________________________________________________________________________________________ __device__ int GetBinNumber(int aNbins, double aMin, double aMax, double aValue) { //TODO check the accuracy of this double tBinSize = (aMax-aMin)/aNbins; double tBinKStarMin, tBinKStarMax; for(int i=0; i<aNbins; i++) { tBinKStarMin = i*tBinSize + aMin; tBinKStarMax = (i+1)*tBinSize + aMin; if(aValue>=tBinKStarMin && aValue<tBinKStarMax) return i; } return -1; //i.e. failure } //________________________________________________________________________________________________________________ __device__ int GetBinNumber(double aBinWidth, double aMin, double aMax, double aValue) { //TODO check the accuracy of this int tNbins = (aMax-aMin)/aBinWidth; double tBinKStarMin, tBinKStarMax; for(int i=0; i<tNbins; i++) { tBinKStarMin = i*aBinWidth + aMin; tBinKStarMax = (i+1)*aBinWidth + aMin; if(aValue>=tBinKStarMin && aValue<tBinKStarMax) return i; } return -1; //i.e. failure } //________________________________________________________________________________________________________________ __device__ int GetInterpLowBin(InterpType aInterpType, InterpAxisType aAxisType, double aVal) { int tReturnBin = -2; int tNbins, tBin; double tMin, tMax, tBinWidth, tBinCenter; bool tErrorFlag = false; switch(aInterpType) { case kGTilde: switch(aAxisType) { case kKaxis: tNbins = d_fGTildeInfo->nBinsK; tBinWidth = d_fGTildeInfo->binWidthK; tMin = d_fGTildeInfo->minK; tMax = d_fGTildeInfo->maxK; break; case kRaxis: tNbins = d_fGTildeInfo->nBinsR; tBinWidth = d_fGTildeInfo->binWidthR; tMin = d_fGTildeInfo->minR; tMax = d_fGTildeInfo->maxR; break; //Invalid axis selection case kThetaaxis: tErrorFlag = true; break; case kReF0axis: tErrorFlag = true; break; case kImF0axis: tErrorFlag = true; break; case kD0axis: tErrorFlag = true; break; } break; case kHyperGeo1F1: switch(aAxisType) { case kKaxis: tNbins = d_fHyperGeo1F1Info->nBinsK; tBinWidth = d_fHyperGeo1F1Info->binWidthK; tMin = d_fHyperGeo1F1Info->minK; tMax = d_fHyperGeo1F1Info->maxK; break; case kRaxis: tNbins = d_fHyperGeo1F1Info->nBinsR; tBinWidth = d_fHyperGeo1F1Info->binWidthR; tMin = d_fHyperGeo1F1Info->minR; tMax = d_fHyperGeo1F1Info->maxR; break; case kThetaaxis: tNbins = d_fHyperGeo1F1Info->nBinsTheta; tBinWidth = d_fHyperGeo1F1Info->binWidthTheta; tMin = d_fHyperGeo1F1Info->minTheta; tMax = d_fHyperGeo1F1Info->maxTheta; break; //Invalid axis selection case kReF0axis: tErrorFlag = true; break; case kImF0axis: tErrorFlag = true; break; case kD0axis: tErrorFlag = true; break; } break; case kScattLen: switch(aAxisType) { case kReF0axis: tNbins = d_fScattLenInfo->nBinsReF0; tBinWidth = d_fScattLenInfo->binWidthReF0; tMin = d_fScattLenInfo->minReF0; tMax = d_fScattLenInfo->maxReF0; break; case kImF0axis: tNbins = d_fScattLenInfo->nBinsImF0; tBinWidth = d_fScattLenInfo->binWidthImF0; tMin = d_fScattLenInfo->minImF0; tMax = d_fScattLenInfo->maxImF0; break; case kD0axis: tNbins = d_fScattLenInfo->nBinsD0; tBinWidth = d_fScattLenInfo->binWidthD0; tMin = d_fScattLenInfo->minD0; tMax = d_fScattLenInfo->maxD0; break; case kKaxis: tNbins = d_fScattLenInfo->nBinsK; tBinWidth = d_fScattLenInfo->binWidthK; tMin = d_fScattLenInfo->minK; tMax = d_fScattLenInfo->maxK; break; //Invalid axis selection case kRaxis: tErrorFlag = true; break; case kThetaaxis: tErrorFlag = true; break; } break; } //Check error if(tErrorFlag) return -2; //--------------------------------- tBin = GetBinNumber(tNbins,tMin,tMax,aVal); tBinCenter = tMin + (tBin+0.5)*tBinWidth; if(aVal < tBinCenter) tReturnBin = tBin-1; else tReturnBin = tBin; if(tReturnBin<0 || tReturnBin >= tNbins) return -2; else return tReturnBin; } //________________________________________________________________________________________________________________ __device__ double GetInterpLowBinCenter(InterpType aInterpType, InterpAxisType aAxisType, double aVal) { double tReturnValue; int tReturnBin = -2; int tNbins, tBin; double tMin, tMax, tBinWidth, tBinCenter; bool tErrorFlag = false; switch(aInterpType) { case kGTilde: switch(aAxisType) { case kKaxis: tNbins = d_fGTildeInfo->nBinsK; tBinWidth = d_fGTildeInfo->binWidthK; tMin = d_fGTildeInfo->minK; tMax = d_fGTildeInfo->maxK; break; case kRaxis: tNbins = d_fGTildeInfo->nBinsR; tBinWidth = d_fGTildeInfo->binWidthR; tMin = d_fGTildeInfo->minR; tMax = d_fGTildeInfo->maxR; break; //Invalid axis selection case kThetaaxis: tErrorFlag = true; break; case kReF0axis: tErrorFlag = true; break; case kImF0axis: tErrorFlag = true; break; case kD0axis: tErrorFlag = true; break; } break; case kHyperGeo1F1: switch(aAxisType) { case kKaxis: tNbins = d_fHyperGeo1F1Info->nBinsK; tBinWidth = d_fHyperGeo1F1Info->binWidthK; tMin = d_fHyperGeo1F1Info->minK; tMax = d_fHyperGeo1F1Info->maxK; break; case kRaxis: tNbins = d_fHyperGeo1F1Info->nBinsR; tBinWidth = d_fHyperGeo1F1Info->binWidthR; tMin = d_fHyperGeo1F1Info->minR; tMax = d_fHyperGeo1F1Info->maxR; break; case kThetaaxis: tNbins = d_fHyperGeo1F1Info->nBinsTheta; tBinWidth = d_fHyperGeo1F1Info->binWidthTheta; tMin = d_fHyperGeo1F1Info->minTheta; tMax = d_fHyperGeo1F1Info->maxTheta; break; //Invalid axis selection case kReF0axis: tErrorFlag = true; break; case kImF0axis: tErrorFlag = true; break; case kD0axis: tErrorFlag = true; break; } break; case kScattLen: switch(aAxisType) { case kReF0axis: tNbins = d_fScattLenInfo->nBinsReF0; tBinWidth = d_fScattLenInfo->binWidthReF0; tMin = d_fScattLenInfo->minReF0; tMax = d_fScattLenInfo->maxReF0; break; case kImF0axis: tNbins = d_fScattLenInfo->nBinsImF0; tBinWidth = d_fScattLenInfo->binWidthImF0; tMin = d_fScattLenInfo->minImF0; tMax = d_fScattLenInfo->maxImF0; break; case kD0axis: tNbins = d_fScattLenInfo->nBinsD0; tBinWidth = d_fScattLenInfo->binWidthD0; tMin = d_fScattLenInfo->minD0; tMax = d_fScattLenInfo->maxD0; break; case kKaxis: tNbins = d_fScattLenInfo->nBinsK; tBinWidth = d_fScattLenInfo->binWidthK; tMin = d_fScattLenInfo->minK; tMax = d_fScattLenInfo->maxK; break; //Invalid axis selection case kRaxis: tErrorFlag = true; break; case kThetaaxis: tErrorFlag = true; break; } break; } //Check error if(tErrorFlag) return -2; //--------------------------------- tBin = GetBinNumber(tNbins,tMin,tMax,aVal); tBinCenter = tMin + (tBin+0.5)*tBinWidth; if(aVal < tBinCenter) tReturnBin = tBin-1; else tReturnBin = tBin; if(tReturnBin<0 || tReturnBin >= tNbins) return -2; tReturnValue = tMin + (tReturnBin+0.5)*tBinWidth; return tReturnValue; } //________________________________________________________________________________________________________________ __device__ cuDoubleComplex GTildeInterpolate(double aKStar, double aRStar) { double tResultReal = 0.; double tResultImag = 0.; //---------------------------- int tNbinsR = d_fGTildeInfo->nBinsR; //---------------------------- //TODO put in check to make sure GetInterpLowBinCenter does not return the error -2 double tBinWidthK = d_fGTildeInfo->binWidthK; int tBinLowK = GetInterpLowBin(kGTilde,kKaxis,aKStar); int tBinHighK = tBinLowK+1; double tBinLowCenterK = GetInterpLowBinCenter(kGTilde,kKaxis,aKStar); double tBinHighCenterK = tBinLowCenterK+tBinWidthK; double tBinWidthR = d_fGTildeInfo->binWidthR; int tBinLowR = GetInterpLowBin(kGTilde,kRaxis,aRStar); int tBinHighR = tBinLowR+1; double tBinLowCenterR = GetInterpLowBinCenter(kGTilde,kRaxis,aRStar); double tBinHighCenterR = tBinLowCenterR+tBinWidthR; //-------------------------- double tQ11Real = d_fGTildeReal[tBinLowR + tBinLowK*tNbinsR]; double tQ12Real = d_fGTildeReal[tBinHighR + tBinLowK*tNbinsR]; double tQ21Real = d_fGTildeReal[tBinLowR + tBinHighK*tNbinsR]; double tQ22Real = d_fGTildeReal[tBinHighR + tBinHighK*tNbinsR]; double tQ11Imag = d_fGTildeImag[tBinLowR + tBinLowK*tNbinsR]; double tQ12Imag = d_fGTildeImag[tBinHighR + tBinLowK*tNbinsR]; double tQ21Imag = d_fGTildeImag[tBinLowR + tBinHighK*tNbinsR]; double tQ22Imag = d_fGTildeImag[tBinHighR + tBinHighK*tNbinsR]; //-------------------------- double tD = 1.0*tBinWidthK*tBinWidthR; tResultReal = (1.0/tD)*(tQ11Real*(tBinHighCenterK-aKStar)*(tBinHighCenterR-aRStar) + tQ21Real*(aKStar-tBinLowCenterK)*(tBinHighCenterR-aRStar) + tQ12Real*(tBinHighCenterK-aKStar)*(aRStar-tBinLowCenterR) + tQ22Real*(aKStar-tBinLowCenterK)*(aRStar-tBinLowCenterR)); tResultImag = (1.0/tD)*(tQ11Imag*(tBinHighCenterK-aKStar)*(tBinHighCenterR-aRStar) + tQ21Imag*(aKStar-tBinLowCenterK)*(tBinHighCenterR-aRStar) + tQ12Imag*(tBinHighCenterK-aKStar)*(aRStar-tBinLowCenterR) + tQ22Imag*(aKStar-tBinLowCenterK)*(aRStar-tBinLowCenterR)); //-------------------------- cuDoubleComplex tReturnValue = make_cuDoubleComplex(tResultReal,tResultImag); return tReturnValue; } //________________________________________________________________________________________________________________ __device__ cuDoubleComplex HyperGeo1F1Interpolate(double aKStar, double aRStar, double aTheta) { double tResultReal = 0.; double tResultImag = 0.; //---------------------------- int tNbinsTheta = d_fHyperGeo1F1Info->nBinsTheta; int tNbinsR = d_fHyperGeo1F1Info->nBinsR; //---------------------------- //TODO put in check to make sure GetInterpLowBinCenter does not return the error -2 double tBinWidthK = d_fHyperGeo1F1Info->binWidthK; int tBin0K = GetInterpLowBin(kHyperGeo1F1,kKaxis,aKStar); int tBin1K = tBin0K+1; double tBin0CenterK = GetInterpLowBinCenter(kHyperGeo1F1,kKaxis,aKStar); // double tBin1CenterK = tBin0CenterK+tBinWidthK; double tBinWidthR = d_fHyperGeo1F1Info->binWidthR; int tBin0R = GetInterpLowBin(kHyperGeo1F1,kRaxis,aRStar); int tBin1R = tBin0R+1; double tBin0CenterR = GetInterpLowBinCenter(kHyperGeo1F1,kRaxis,aRStar); // double tBin1CenterR = tBin0CenterR+tBinWidthR; double tBinWidthTheta = d_fHyperGeo1F1Info->binWidthTheta; int tBin0Theta = GetInterpLowBin(kHyperGeo1F1,kThetaaxis,aTheta); int tBin1Theta = tBin0Theta+1; double tBin0CenterTheta = GetInterpLowBinCenter(kHyperGeo1F1,kThetaaxis,aTheta); // double tBin1CenterTheta = tBin0CenterTheta+tBinWidthTheta; //-------------------------- double tDiffK = (aKStar - tBin0CenterK)/tBinWidthK; double tDiffR = (aRStar - tBin0CenterR)/tBinWidthR; double tDiffTheta = (aTheta - tBin0CenterTheta)/tBinWidthTheta; //-----------REAL--------------- //interpolate along z (i.e. theta) double tC000Real = d_fHyperGeo1F1Real[tBin0K*tNbinsTheta*tNbinsR + tBin0R*tNbinsTheta + tBin0Theta]; double tC001Real = d_fHyperGeo1F1Real[tBin0K*tNbinsTheta*tNbinsR + tBin0R*tNbinsTheta + tBin1Theta]; double tC010Real = d_fHyperGeo1F1Real[tBin0K*tNbinsTheta*tNbinsR + tBin1R*tNbinsTheta + tBin0Theta]; double tC011Real = d_fHyperGeo1F1Real[tBin0K*tNbinsTheta*tNbinsR + tBin1R*tNbinsTheta + tBin1Theta]; double tC100Real = d_fHyperGeo1F1Real[tBin1K*tNbinsTheta*tNbinsR + tBin0R*tNbinsTheta + tBin0Theta]; double tC101Real = d_fHyperGeo1F1Real[tBin1K*tNbinsTheta*tNbinsR + tBin0R*tNbinsTheta + tBin1Theta]; double tC110Real = d_fHyperGeo1F1Real[tBin1K*tNbinsTheta*tNbinsR + tBin1R*tNbinsTheta + tBin0Theta]; double tC111Real = d_fHyperGeo1F1Real[tBin1K*tNbinsTheta*tNbinsR + tBin1R*tNbinsTheta + tBin1Theta]; double tC00Real = tC000Real*(1.0-tDiffTheta) + tC001Real*tDiffTheta; double tC01Real = tC010Real*(1.0-tDiffTheta) + tC011Real*tDiffTheta; double tC10Real = tC100Real*(1.0-tDiffTheta) + tC101Real*tDiffTheta; double tC11Real = tC110Real*(1.0-tDiffTheta) + tC111Real*tDiffTheta; //interpolate along y (i.e. r) double tC0Real = tC00Real*(1.0-tDiffR) + tC01Real*tDiffR; double tC1Real = tC10Real*(1.0-tDiffR) + tC11Real*tDiffR; //interpolate along x (i.e. k) tResultReal = tC0Real*(1.0-tDiffK) + tC1Real*tDiffK; //-----------IMAG--------------- //interpolate along z (i.e. theta) double tC000Imag = d_fHyperGeo1F1Imag[tBin0K*tNbinsTheta*tNbinsR + tBin0R*tNbinsTheta + tBin0Theta]; double tC001Imag = d_fHyperGeo1F1Imag[tBin0K*tNbinsTheta*tNbinsR + tBin0R*tNbinsTheta + tBin1Theta]; double tC010Imag = d_fHyperGeo1F1Imag[tBin0K*tNbinsTheta*tNbinsR + tBin1R*tNbinsTheta + tBin0Theta]; double tC011Imag = d_fHyperGeo1F1Imag[tBin0K*tNbinsTheta*tNbinsR + tBin1R*tNbinsTheta + tBin1Theta]; double tC100Imag = d_fHyperGeo1F1Imag[tBin1K*tNbinsTheta*tNbinsR + tBin0R*tNbinsTheta + tBin0Theta]; double tC101Imag = d_fHyperGeo1F1Imag[tBin1K*tNbinsTheta*tNbinsR + tBin0R*tNbinsTheta + tBin1Theta]; double tC110Imag = d_fHyperGeo1F1Imag[tBin1K*tNbinsTheta*tNbinsR + tBin1R*tNbinsTheta + tBin0Theta]; double tC111Imag = d_fHyperGeo1F1Imag[tBin1K*tNbinsTheta*tNbinsR + tBin1R*tNbinsTheta + tBin1Theta]; double tC00Imag = tC000Imag*(1.0-tDiffTheta) + tC001Imag*tDiffTheta; double tC01Imag = tC010Imag*(1.0-tDiffTheta) + tC011Imag*tDiffTheta; double tC10Imag = tC100Imag*(1.0-tDiffTheta) + tC101Imag*tDiffTheta; double tC11Imag = tC110Imag*(1.0-tDiffTheta) + tC111Imag*tDiffTheta; //interpolate along y (i.e. r) double tC0Imag = tC00Imag*(1.0-tDiffR) + tC01Imag*tDiffR; double tC1Imag = tC10Imag*(1.0-tDiffR) + tC11Imag*tDiffR; //interpolate along x (i.e. k) tResultImag = tC0Imag*(1.0-tDiffK) + tC1Imag*tDiffK; //-------------------------------- cuDoubleComplex tReturnValue = make_cuDoubleComplex(tResultReal,tResultImag); return tReturnValue; } /* //________________________________________________________________________________________________________________ __device__ cuDoubleComplex ScattLenInterpolateFull(double aReF0, double aImF0, double aD0, double aKStar) { //This doesn't work because d_fCoulombScatteringLengthReal and d_fCoulombScatteringLengthImag are // too big to fit onto the GPU memory. I am keeping it in case I figure out how to resolve the memory issue // i.e. figure out how to let the device directly access host memory double tResultReal = 0.; double tResultImag = 0.; //---------------------------- int tNbinsK = d_fScattLenInfo->nBinsK; int tNbinsD0 = d_fScattLenInfo->nBinsD0; int tNbinsImF0 = d_fScattLenInfo->nBinsImF0; //---------------------------- //TODO put in check to make sure GetInterpLowBinCenter does not return the error -2 double tBinWidthReF0 = d_fScattLenInfo->binWidthReF0; int tBin0ReF0 = GetInterpLowBin(kScattLen,kReF0axis,aReF0); int tBin1ReF0 = tBin0ReF0+1; double tBin0CenterReF0 = GetInterpLowBinCenter(kScattLen,kReF0axis,aReF0); // double tBin1CenterReF0 = tBin0CenterReF0+tBinWidthReF0; double tBinWidthImF0 = d_fScattLenInfo->binWidthImF0; int tBin0ImF0 = GetInterpLowBin(kScattLen,kImF0axis,aImF0); int tBin1ImF0 = tBin0ImF0+1; double tBin0CenterImF0 = GetInterpLowBinCenter(kScattLen,kImF0axis,aImF0); // double tBin1CenterImF0 = tBin0CenterImF0+tBinWidthImF0; double tBinWidthD0 = d_fScattLenInfo->binWidthD0; int tBin0D0 = GetInterpLowBin(kScattLen,kD0axis,aD0); int tBin1D0 = tBin0D0+1; double tBin0CenterD0 = GetInterpLowBinCenter(kScattLen,kD0axis,aD0); // double tBin1CenterD0 = tBin0CenterD0+tBinWidthD0; double tBinWidthK = d_fScattLenInfo->binWidthK; int tBin0K = GetInterpLowBin(kScattLen,kKaxis,aKStar); int tBin1K = tBin0K+1; double tBin0CenterK = GetInterpLowBinCenter(kScattLen,kKaxis,aKStar); // double tBin1CenterK = tBin0CenterK+tBinWidthK; //-------------------------- double tDiffReF0 = (aReF0 - tBin0CenterReF0)/tBinWidthReF0; double tDiffImF0 = (aImF0 - tBin0CenterImF0)/tBinWidthImF0; double tDiffD0 = (aD0 - tBin0CenterD0)/tBinWidthD0; double tDiffK = (aKStar - tBin0CenterK)/tBinWidthK; //-------------------------- //Assuming f(t,x,y,z) = f(ReF0,ImF0,D0,KStar). Ordering for memory access reasons //---------------REAL---------------------------------- //interpolate along z (i.e. KStar) double tC0000Real = d_fCoulombScatteringLengthReal[tBin0ReF0*tNbinsImF0*tNbinsD0*tNbinsK + tBin0ImF0*tNbinsD0*tNbinsK + tBin0D0*tNbinsK + tBin0K]; double tC0001Real = d_fCoulombScatteringLengthReal[tBin0ReF0*tNbinsImF0*tNbinsD0*tNbinsK + tBin0ImF0*tNbinsD0*tNbinsK + tBin0D0*tNbinsK + tBin1K]; double tC0010Real = d_fCoulombScatteringLengthReal[tBin0ReF0*tNbinsImF0*tNbinsD0*tNbinsK + tBin0ImF0*tNbinsD0*tNbinsK + tBin1D0*tNbinsK + tBin0K]; double tC0011Real = d_fCoulombScatteringLengthReal[tBin0ReF0*tNbinsImF0*tNbinsD0*tNbinsK + tBin0ImF0*tNbinsD0*tNbinsK + tBin1D0*tNbinsK + tBin1K]; double tC0100Real = d_fCoulombScatteringLengthReal[tBin0ReF0*tNbinsImF0*tNbinsD0*tNbinsK + tBin1ImF0*tNbinsD0*tNbinsK + tBin0D0*tNbinsK + tBin0K]; double tC0101Real = d_fCoulombScatteringLengthReal[tBin0ReF0*tNbinsImF0*tNbinsD0*tNbinsK + tBin1ImF0*tNbinsD0*tNbinsK + tBin0D0*tNbinsK + tBin1K]; double tC0110Real = d_fCoulombScatteringLengthReal[tBin0ReF0*tNbinsImF0*tNbinsD0*tNbinsK + tBin1ImF0*tNbinsD0*tNbinsK + tBin1D0*tNbinsK + tBin0K]; double tC0111Real = d_fCoulombScatteringLengthReal[tBin0ReF0*tNbinsImF0*tNbinsD0*tNbinsK + tBin1ImF0*tNbinsD0*tNbinsK + tBin1D0*tNbinsK + tBin1K]; double tC1000Real = d_fCoulombScatteringLengthReal[tBin1ReF0*tNbinsImF0*tNbinsD0*tNbinsK + tBin0ImF0*tNbinsD0*tNbinsK + tBin0D0*tNbinsK + tBin0K]; double tC1001Real = d_fCoulombScatteringLengthReal[tBin1ReF0*tNbinsImF0*tNbinsD0*tNbinsK + tBin0ImF0*tNbinsD0*tNbinsK + tBin0D0*tNbinsK + tBin1K]; double tC1010Real = d_fCoulombScatteringLengthReal[tBin1ReF0*tNbinsImF0*tNbinsD0*tNbinsK + tBin0ImF0*tNbinsD0*tNbinsK + tBin1D0*tNbinsK + tBin0K]; double tC1011Real = d_fCoulombScatteringLengthReal[tBin1ReF0*tNbinsImF0*tNbinsD0*tNbinsK + tBin0ImF0*tNbinsD0*tNbinsK + tBin1D0*tNbinsK + tBin1K]; double tC1100Real = d_fCoulombScatteringLengthReal[tBin1ReF0*tNbinsImF0*tNbinsD0*tNbinsK + tBin1ImF0*tNbinsD0*tNbinsK + tBin0D0*tNbinsK + tBin0K]; double tC1101Real = d_fCoulombScatteringLengthReal[tBin1ReF0*tNbinsImF0*tNbinsD0*tNbinsK + tBin1ImF0*tNbinsD0*tNbinsK + tBin0D0*tNbinsK + tBin1K]; double tC1110Real = d_fCoulombScatteringLengthReal[tBin1ReF0*tNbinsImF0*tNbinsD0*tNbinsK + tBin1ImF0*tNbinsD0*tNbinsK + tBin1D0*tNbinsK + tBin0K]; double tC1111Real = d_fCoulombScatteringLengthReal[tBin1ReF0*tNbinsImF0*tNbinsD0*tNbinsK + tBin1ImF0*tNbinsD0*tNbinsK + tBin1D0*tNbinsK + tBin1K]; //--- double tC000Real = tC0000Real*(1.0-tDiffK) + tC0001Real*tDiffK; double tC001Real = tC0010Real*(1.0-tDiffK) + tC0011Real*tDiffK; double tC010Real = tC0100Real*(1.0-tDiffK) + tC0101Real*tDiffK; double tC011Real = tC0110Real*(1.0-tDiffK) + tC0111Real*tDiffK; double tC100Real = tC1000Real*(1.0-tDiffK) + tC1001Real*tDiffK; double tC101Real = tC1010Real*(1.0-tDiffK) + tC1011Real*tDiffK; double tC110Real = tC1100Real*(1.0-tDiffK) + tC1101Real*tDiffK; double tC111Real = tC1110Real*(1.0-tDiffK) + tC1111Real*tDiffK; //interpolate along y (i.e. D0) double tC00Real = tC000Real*(1.0-tDiffD0) + tC001Real*tDiffD0; double tC01Real = tC010Real*(1.0-tDiffD0) + tC011Real*tDiffD0; double tC10Real = tC100Real*(1.0-tDiffD0) + tC101Real*tDiffD0; double tC11Real = tC110Real*(1.0-tDiffD0) + tC111Real*tDiffD0; //interpolate along x (i.e. ImF0) double tC0Real = tC00Real*(1.0-tDiffImF0) + tC01Real*tDiffImF0; double tC1Real = tC10Real*(1.0-tDiffImF0) + tC11Real*tDiffImF0; //interpolate along t (i.e. ReF0) tResultReal = tC0Real*(1.0-tDiffReF0) + tC1Real*tDiffReF0; //---------------Imag---------------------------------- //interpolate along z (i.e. KStar) double tC0000Imag = d_fCoulombScatteringLengthImag[tBin0ReF0*tNbinsImF0*tNbinsD0*tNbinsK + tBin0ImF0*tNbinsD0*tNbinsK + tBin0D0*tNbinsK + tBin0K]; double tC0001Imag = d_fCoulombScatteringLengthImag[tBin0ReF0*tNbinsImF0*tNbinsD0*tNbinsK + tBin0ImF0*tNbinsD0*tNbinsK + tBin0D0*tNbinsK + tBin1K]; double tC0010Imag = d_fCoulombScatteringLengthImag[tBin0ReF0*tNbinsImF0*tNbinsD0*tNbinsK + tBin0ImF0*tNbinsD0*tNbinsK + tBin1D0*tNbinsK + tBin0K]; double tC0011Imag = d_fCoulombScatteringLengthImag[tBin0ReF0*tNbinsImF0*tNbinsD0*tNbinsK + tBin0ImF0*tNbinsD0*tNbinsK + tBin1D0*tNbinsK + tBin1K]; double tC0100Imag = d_fCoulombScatteringLengthImag[tBin0ReF0*tNbinsImF0*tNbinsD0*tNbinsK + tBin1ImF0*tNbinsD0*tNbinsK + tBin0D0*tNbinsK + tBin0K]; double tC0101Imag = d_fCoulombScatteringLengthImag[tBin0ReF0*tNbinsImF0*tNbinsD0*tNbinsK + tBin1ImF0*tNbinsD0*tNbinsK + tBin0D0*tNbinsK + tBin1K]; double tC0110Imag = d_fCoulombScatteringLengthImag[tBin0ReF0*tNbinsImF0*tNbinsD0*tNbinsK + tBin1ImF0*tNbinsD0*tNbinsK + tBin1D0*tNbinsK + tBin0K]; double tC0111Imag = d_fCoulombScatteringLengthImag[tBin0ReF0*tNbinsImF0*tNbinsD0*tNbinsK + tBin1ImF0*tNbinsD0*tNbinsK + tBin1D0*tNbinsK + tBin1K]; double tC1000Imag = d_fCoulombScatteringLengthImag[tBin1ReF0*tNbinsImF0*tNbinsD0*tNbinsK + tBin0ImF0*tNbinsD0*tNbinsK + tBin0D0*tNbinsK + tBin0K]; double tC1001Imag = d_fCoulombScatteringLengthImag[tBin1ReF0*tNbinsImF0*tNbinsD0*tNbinsK + tBin0ImF0*tNbinsD0*tNbinsK + tBin0D0*tNbinsK + tBin1K]; double tC1010Imag = d_fCoulombScatteringLengthImag[tBin1ReF0*tNbinsImF0*tNbinsD0*tNbinsK + tBin0ImF0*tNbinsD0*tNbinsK + tBin1D0*tNbinsK + tBin0K]; double tC1011Imag = d_fCoulombScatteringLengthImag[tBin1ReF0*tNbinsImF0*tNbinsD0*tNbinsK + tBin0ImF0*tNbinsD0*tNbinsK + tBin1D0*tNbinsK + tBin1K]; double tC1100Imag = d_fCoulombScatteringLengthImag[tBin1ReF0*tNbinsImF0*tNbinsD0*tNbinsK + tBin1ImF0*tNbinsD0*tNbinsK + tBin0D0*tNbinsK + tBin0K]; double tC1101Imag = d_fCoulombScatteringLengthImag[tBin1ReF0*tNbinsImF0*tNbinsD0*tNbinsK + tBin1ImF0*tNbinsD0*tNbinsK + tBin0D0*tNbinsK + tBin1K]; double tC1110Imag = d_fCoulombScatteringLengthImag[tBin1ReF0*tNbinsImF0*tNbinsD0*tNbinsK + tBin1ImF0*tNbinsD0*tNbinsK + tBin1D0*tNbinsK + tBin0K]; double tC1111Imag = d_fCoulombScatteringLengthImag[tBin1ReF0*tNbinsImF0*tNbinsD0*tNbinsK + tBin1ImF0*tNbinsD0*tNbinsK + tBin1D0*tNbinsK + tBin1K]; //--- double tC000Imag = tC0000Imag*(1.0-tDiffK) + tC0001Imag*tDiffK; double tC001Imag = tC0010Imag*(1.0-tDiffK) + tC0011Imag*tDiffK; double tC010Imag = tC0100Imag*(1.0-tDiffK) + tC0101Imag*tDiffK; double tC011Imag = tC0110Imag*(1.0-tDiffK) + tC0111Imag*tDiffK; double tC100Imag = tC1000Imag*(1.0-tDiffK) + tC1001Imag*tDiffK; double tC101Imag = tC1010Imag*(1.0-tDiffK) + tC1011Imag*tDiffK; double tC110Imag = tC1100Imag*(1.0-tDiffK) + tC1101Imag*tDiffK; double tC111Imag = tC1110Imag*(1.0-tDiffK) + tC1111Imag*tDiffK; //interpolate along y (i.e. D0) double tC00Imag = tC000Imag*(1.0-tDiffD0) + tC001Imag*tDiffD0; double tC01Imag = tC010Imag*(1.0-tDiffD0) + tC011Imag*tDiffD0; double tC10Imag = tC100Imag*(1.0-tDiffD0) + tC101Imag*tDiffD0; double tC11Imag = tC110Imag*(1.0-tDiffD0) + tC111Imag*tDiffD0; //interpolate along x (i.e. ImF0) double tC0Imag = tC00Imag*(1.0-tDiffImF0) + tC01Imag*tDiffImF0; double tC1Imag = tC10Imag*(1.0-tDiffImF0) + tC11Imag*tDiffImF0; //interpolate along t (i.e. ReF0) tResultImag = tC0Imag*(1.0-tDiffReF0) + tC1Imag*tDiffReF0; //-------------------------------- cuDoubleComplex tReturnValue = make_cuDoubleComplex(tResultReal,tResultImag); return tReturnValue; } */ //________________________________________________________________________________________________________________ __device__ cuDoubleComplex ScattLenInterpolate(double aReF0, double aImF0, double aD0, double aKStar) { double tResultReal = 0.; double tResultImag = 0.; //---------------------------- int tNbinsK = d_fScattLenInfo->nBinsK; // int tNbinsD0 = d_fScattLenInfo->nBinsD0; // int tNbinsImF0 = d_fScattLenInfo->nBinsImF0; int tNbinsD0 = 2; int tNbinsImF0 = 2; //---------------------------- //TODO put in check to make sure GetInterpLowBinCenter does not return the error -2 double tBinWidthReF0 = d_fScattLenInfo->binWidthReF0; int tBin0ReF0 = 0; int tBin1ReF0 = tBin0ReF0+1; double tBin0CenterReF0 = GetInterpLowBinCenter(kScattLen,kReF0axis,aReF0); // double tBin1CenterReF0 = tBin0CenterReF0+tBinWidthReF0; double tBinWidthImF0 = d_fScattLenInfo->binWidthImF0; int tBin0ImF0 = 0; int tBin1ImF0 = tBin0ImF0+1; double tBin0CenterImF0 = GetInterpLowBinCenter(kScattLen,kImF0axis,aImF0); // double tBin1CenterImF0 = tBin0CenterImF0+tBinWidthImF0; double tBinWidthD0 = d_fScattLenInfo->binWidthD0; int tBin0D0 = 0; int tBin1D0 = tBin0D0+1; double tBin0CenterD0 = GetInterpLowBinCenter(kScattLen,kD0axis,aD0); // double tBin1CenterD0 = tBin0CenterD0+tBinWidthD0; double tBinWidthK = d_fScattLenInfo->binWidthK; int tBin0K = GetInterpLowBin(kScattLen,kKaxis,aKStar); int tBin1K = tBin0K+1; double tBin0CenterK = GetInterpLowBinCenter(kScattLen,kKaxis,aKStar); // double tBin1CenterK = tBin0CenterK+tBinWidthK; //-------------------------- assert(tBin0K>=0); assert(tBin0CenterK>0); double tDiffReF0 = (aReF0 - tBin0CenterReF0)/tBinWidthReF0; double tDiffImF0 = (aImF0 - tBin0CenterImF0)/tBinWidthImF0; double tDiffD0 = (aD0 - tBin0CenterD0)/tBinWidthD0; double tDiffK = (aKStar - tBin0CenterK)/tBinWidthK; //-------------------------- //Assuming f(t,x,y,z) = f(ReF0,ImF0,D0,KStar). Ordering for memory access reasons //---------------REAL---------------------------------- //interpolate along z (i.e. KStar) double tC0000Real = d_fScattLenRealSubVec[tBin0ReF0*tNbinsImF0*tNbinsD0*tNbinsK + tBin0ImF0*tNbinsD0*tNbinsK + tBin0D0*tNbinsK + tBin0K]; double tC0001Real = d_fScattLenRealSubVec[tBin0ReF0*tNbinsImF0*tNbinsD0*tNbinsK + tBin0ImF0*tNbinsD0*tNbinsK + tBin0D0*tNbinsK + tBin1K]; double tC0010Real = d_fScattLenRealSubVec[tBin0ReF0*tNbinsImF0*tNbinsD0*tNbinsK + tBin0ImF0*tNbinsD0*tNbinsK + tBin1D0*tNbinsK + tBin0K]; double tC0011Real = d_fScattLenRealSubVec[tBin0ReF0*tNbinsImF0*tNbinsD0*tNbinsK + tBin0ImF0*tNbinsD0*tNbinsK + tBin1D0*tNbinsK + tBin1K]; double tC0100Real = d_fScattLenRealSubVec[tBin0ReF0*tNbinsImF0*tNbinsD0*tNbinsK + tBin1ImF0*tNbinsD0*tNbinsK + tBin0D0*tNbinsK + tBin0K]; double tC0101Real = d_fScattLenRealSubVec[tBin0ReF0*tNbinsImF0*tNbinsD0*tNbinsK + tBin1ImF0*tNbinsD0*tNbinsK + tBin0D0*tNbinsK + tBin1K]; double tC0110Real = d_fScattLenRealSubVec[tBin0ReF0*tNbinsImF0*tNbinsD0*tNbinsK + tBin1ImF0*tNbinsD0*tNbinsK + tBin1D0*tNbinsK + tBin0K]; double tC0111Real = d_fScattLenRealSubVec[tBin0ReF0*tNbinsImF0*tNbinsD0*tNbinsK + tBin1ImF0*tNbinsD0*tNbinsK + tBin1D0*tNbinsK + tBin1K]; double tC1000Real = d_fScattLenRealSubVec[tBin1ReF0*tNbinsImF0*tNbinsD0*tNbinsK + tBin0ImF0*tNbinsD0*tNbinsK + tBin0D0*tNbinsK + tBin0K]; double tC1001Real = d_fScattLenRealSubVec[tBin1ReF0*tNbinsImF0*tNbinsD0*tNbinsK + tBin0ImF0*tNbinsD0*tNbinsK + tBin0D0*tNbinsK + tBin1K]; double tC1010Real = d_fScattLenRealSubVec[tBin1ReF0*tNbinsImF0*tNbinsD0*tNbinsK + tBin0ImF0*tNbinsD0*tNbinsK + tBin1D0*tNbinsK + tBin0K]; double tC1011Real = d_fScattLenRealSubVec[tBin1ReF0*tNbinsImF0*tNbinsD0*tNbinsK + tBin0ImF0*tNbinsD0*tNbinsK + tBin1D0*tNbinsK + tBin1K]; double tC1100Real = d_fScattLenRealSubVec[tBin1ReF0*tNbinsImF0*tNbinsD0*tNbinsK + tBin1ImF0*tNbinsD0*tNbinsK + tBin0D0*tNbinsK + tBin0K]; double tC1101Real = d_fScattLenRealSubVec[tBin1ReF0*tNbinsImF0*tNbinsD0*tNbinsK + tBin1ImF0*tNbinsD0*tNbinsK + tBin0D0*tNbinsK + tBin1K]; double tC1110Real = d_fScattLenRealSubVec[tBin1ReF0*tNbinsImF0*tNbinsD0*tNbinsK + tBin1ImF0*tNbinsD0*tNbinsK + tBin1D0*tNbinsK + tBin0K]; double tC1111Real = d_fScattLenRealSubVec[tBin1ReF0*tNbinsImF0*tNbinsD0*tNbinsK + tBin1ImF0*tNbinsD0*tNbinsK + tBin1D0*tNbinsK + tBin1K]; //--- double tC000Real = tC0000Real*(1.0-tDiffK) + tC0001Real*tDiffK; double tC001Real = tC0010Real*(1.0-tDiffK) + tC0011Real*tDiffK; double tC010Real = tC0100Real*(1.0-tDiffK) + tC0101Real*tDiffK; double tC011Real = tC0110Real*(1.0-tDiffK) + tC0111Real*tDiffK; double tC100Real = tC1000Real*(1.0-tDiffK) + tC1001Real*tDiffK; double tC101Real = tC1010Real*(1.0-tDiffK) + tC1011Real*tDiffK; double tC110Real = tC1100Real*(1.0-tDiffK) + tC1101Real*tDiffK; double tC111Real = tC1110Real*(1.0-tDiffK) + tC1111Real*tDiffK; //interpolate along y (i.e. D0) double tC00Real = tC000Real*(1.0-tDiffD0) + tC001Real*tDiffD0; double tC01Real = tC010Real*(1.0-tDiffD0) + tC011Real*tDiffD0; double tC10Real = tC100Real*(1.0-tDiffD0) + tC101Real*tDiffD0; double tC11Real = tC110Real*(1.0-tDiffD0) + tC111Real*tDiffD0; //interpolate along x (i.e. ImF0) double tC0Real = tC00Real*(1.0-tDiffImF0) + tC01Real*tDiffImF0; double tC1Real = tC10Real*(1.0-tDiffImF0) + tC11Real*tDiffImF0; //interpolate along t (i.e. ReF0) tResultReal = tC0Real*(1.0-tDiffReF0) + tC1Real*tDiffReF0; //---------------Imag---------------------------------- //interpolate along z (i.e. KStar) double tC0000Imag = d_fScattLenImagSubVec[tBin0ReF0*tNbinsImF0*tNbinsD0*tNbinsK + tBin0ImF0*tNbinsD0*tNbinsK + tBin0D0*tNbinsK + tBin0K]; double tC0001Imag = d_fScattLenImagSubVec[tBin0ReF0*tNbinsImF0*tNbinsD0*tNbinsK + tBin0ImF0*tNbinsD0*tNbinsK + tBin0D0*tNbinsK + tBin1K]; double tC0010Imag = d_fScattLenImagSubVec[tBin0ReF0*tNbinsImF0*tNbinsD0*tNbinsK + tBin0ImF0*tNbinsD0*tNbinsK + tBin1D0*tNbinsK + tBin0K]; double tC0011Imag = d_fScattLenImagSubVec[tBin0ReF0*tNbinsImF0*tNbinsD0*tNbinsK + tBin0ImF0*tNbinsD0*tNbinsK + tBin1D0*tNbinsK + tBin1K]; double tC0100Imag = d_fScattLenImagSubVec[tBin0ReF0*tNbinsImF0*tNbinsD0*tNbinsK + tBin1ImF0*tNbinsD0*tNbinsK + tBin0D0*tNbinsK + tBin0K]; double tC0101Imag = d_fScattLenImagSubVec[tBin0ReF0*tNbinsImF0*tNbinsD0*tNbinsK + tBin1ImF0*tNbinsD0*tNbinsK + tBin0D0*tNbinsK + tBin1K]; double tC0110Imag = d_fScattLenImagSubVec[tBin0ReF0*tNbinsImF0*tNbinsD0*tNbinsK + tBin1ImF0*tNbinsD0*tNbinsK + tBin1D0*tNbinsK + tBin0K]; double tC0111Imag = d_fScattLenImagSubVec[tBin0ReF0*tNbinsImF0*tNbinsD0*tNbinsK + tBin1ImF0*tNbinsD0*tNbinsK + tBin1D0*tNbinsK + tBin1K]; double tC1000Imag = d_fScattLenImagSubVec[tBin1ReF0*tNbinsImF0*tNbinsD0*tNbinsK + tBin0ImF0*tNbinsD0*tNbinsK + tBin0D0*tNbinsK + tBin0K]; double tC1001Imag = d_fScattLenImagSubVec[tBin1ReF0*tNbinsImF0*tNbinsD0*tNbinsK + tBin0ImF0*tNbinsD0*tNbinsK + tBin0D0*tNbinsK + tBin1K]; double tC1010Imag = d_fScattLenImagSubVec[tBin1ReF0*tNbinsImF0*tNbinsD0*tNbinsK + tBin0ImF0*tNbinsD0*tNbinsK + tBin1D0*tNbinsK + tBin0K]; double tC1011Imag = d_fScattLenImagSubVec[tBin1ReF0*tNbinsImF0*tNbinsD0*tNbinsK + tBin0ImF0*tNbinsD0*tNbinsK + tBin1D0*tNbinsK + tBin1K]; double tC1100Imag = d_fScattLenImagSubVec[tBin1ReF0*tNbinsImF0*tNbinsD0*tNbinsK + tBin1ImF0*tNbinsD0*tNbinsK + tBin0D0*tNbinsK + tBin0K]; double tC1101Imag = d_fScattLenImagSubVec[tBin1ReF0*tNbinsImF0*tNbinsD0*tNbinsK + tBin1ImF0*tNbinsD0*tNbinsK + tBin0D0*tNbinsK + tBin1K]; double tC1110Imag = d_fScattLenImagSubVec[tBin1ReF0*tNbinsImF0*tNbinsD0*tNbinsK + tBin1ImF0*tNbinsD0*tNbinsK + tBin1D0*tNbinsK + tBin0K]; double tC1111Imag = d_fScattLenImagSubVec[tBin1ReF0*tNbinsImF0*tNbinsD0*tNbinsK + tBin1ImF0*tNbinsD0*tNbinsK + tBin1D0*tNbinsK + tBin1K]; //--- double tC000Imag = tC0000Imag*(1.0-tDiffK) + tC0001Imag*tDiffK; double tC001Imag = tC0010Imag*(1.0-tDiffK) + tC0011Imag*tDiffK; double tC010Imag = tC0100Imag*(1.0-tDiffK) + tC0101Imag*tDiffK; double tC011Imag = tC0110Imag*(1.0-tDiffK) + tC0111Imag*tDiffK; double tC100Imag = tC1000Imag*(1.0-tDiffK) + tC1001Imag*tDiffK; double tC101Imag = tC1010Imag*(1.0-tDiffK) + tC1011Imag*tDiffK; double tC110Imag = tC1100Imag*(1.0-tDiffK) + tC1101Imag*tDiffK; double tC111Imag = tC1110Imag*(1.0-tDiffK) + tC1111Imag*tDiffK; //interpolate along y (i.e. D0) double tC00Imag = tC000Imag*(1.0-tDiffD0) + tC001Imag*tDiffD0; double tC01Imag = tC010Imag*(1.0-tDiffD0) + tC011Imag*tDiffD0; double tC10Imag = tC100Imag*(1.0-tDiffD0) + tC101Imag*tDiffD0; double tC11Imag = tC110Imag*(1.0-tDiffD0) + tC111Imag*tDiffD0; //interpolate along x (i.e. ImF0) double tC0Imag = tC00Imag*(1.0-tDiffImF0) + tC01Imag*tDiffImF0; double tC1Imag = tC10Imag*(1.0-tDiffImF0) + tC11Imag*tDiffImF0; //interpolate along t (i.e. ReF0) tResultImag = tC0Imag*(1.0-tDiffReF0) + tC1Imag*tDiffReF0; //-------------------------------- cuDoubleComplex tReturnValue = make_cuDoubleComplex(tResultReal,tResultImag); return tReturnValue; } //________________________________________________________________________________________________________________ __device__ double GetGamowFactor(double aKStar) { double d_hbarc = 0.197327; double d_gBohrRadius = 75.23349845; //TODO figure out how to load hbarc and gBohrRadius into GPU //TODO figure out how to use Pi here //TODO figure out how to make bohr radius negative when needed double tEta = pow(((aKStar/d_hbarc)*d_gBohrRadius),-1); tEta *= 6.28318530718; //eta always comes with 2Pi here double tGamow = tEta*pow((exp(tEta)-1),-1); return tGamow; } //________________________________________________________________________________________________________________ __device__ cuDoubleComplex GetExpTerm(double aKStar, double aRStar, double aTheta) { //TODO figure out how to load hbarc and gBohrRadius into GPU double d_hbarc = 0.197327; double tReal = cos((aKStar/d_hbarc)*aRStar*cos(aTheta)); double tImag = -sin((aKStar/d_hbarc)*aRStar*cos(aTheta)); cuDoubleComplex tExpTermCmplx = make_cuDoubleComplex(tReal,tImag); return tExpTermCmplx; } //________________________________________________________________________________________________________________ __device__ double AssembleWfSquared(double aRStarMag, double aGamowFactor, cuDoubleComplex aExpTermCmplx, cuDoubleComplex aGTildeCmplx, cuDoubleComplex aHyperGeo1F1Cmplx, cuDoubleComplex aScattLenCmplx) { cuDoubleComplex tGTildeCmplxConj = cuConj(aGTildeCmplx); cuDoubleComplex tScattLenCmplxConj = cuConj(aScattLenCmplx); // cuDoubleComplex tGamowFactor = make_cuDoubleComplex(aGamowFactor,0.); //cuda doesn't want to multiply double*double2 //-------------Stupid cuda can only multiple/divide two at once //TODO test to see if there is an easier way to accomplish this double tMagSq_HyperGeo1F1 = cuCabs(aHyperGeo1F1Cmplx)*cuCabs(aHyperGeo1F1Cmplx); double tMagSq_ScattLen = cuCabs(tScattLenCmplxConj)*cuCabs(tScattLenCmplxConj); double tMagSq_GTilde = cuCabs(tGTildeCmplxConj)*cuCabs(tGTildeCmplxConj); cuDoubleComplex tTerm1 = cuCmul(aExpTermCmplx,aHyperGeo1F1Cmplx); cuDoubleComplex tTerm2 = cuCmul(tScattLenCmplxConj,tGTildeCmplxConj); cuDoubleComplex tTerm12 = cuCmul(tTerm1,tTerm2); double tTerm12Real = cuCreal(tTerm12); double tTermFinal = tTerm12Real/aRStarMag; /* cuDoubleComplex tRStarMagCmplx = make_cuDoubleComplex(aRStarMag,0.); cuDoubleComplex tTermFinalCmplx = cuCdiv(tTerm12,tRStarMagCmplx); double tTermFinal = cuCreal(tTermFinalCmplx); */ double tResult = aGamowFactor*(tMagSq_HyperGeo1F1 + tMagSq_ScattLen*tMagSq_GTilde/(aRStarMag*aRStarMag) + 2.0*tTermFinal); return tResult; /* cuDoubleComplex tResultComplex = tGamowFactor*( cuCabs(aHyperGeo1F1Cmplx)*cuCabs(aHyperGeo1F1Cmplx) + cuCabs(tScattLenCmplxConj)*cuCabs(tScattLenCmplxConj)*cuCabs(tGTildeCmplxConj)*cuCabs(tGTildeCmplxConj)/(aRStarMag*aRStarMag) + 2.*cuCreal(aExpTermCmplx*aHyperGeo1F1Cmplx*tScattLenCmplxConj*tGTildeCmplxConj/aRStarMag) ); //TODO put in check to make sure there is no imaginary part // if(imag(tResultComplex) > std::numeric_limits< double >::min()) cout << "\t\t\t !!!!!!!!! Imaginary value in ParellelWaveFunction::InterpolateWfSquared !!!!!" << endl; // assert(imag(tResultComplex) < std::numeric_limits< double >::min()); return cuCreal(tResultComplex); */ } //________________________________________________________________________________________________________________ __global__ void InterpolateWfSquared(double *aKStarMag, double *aRStarMag, double *aTheta, double aReF0, double aImF0, double aD0, double *aWfSquared) { //TODO figure out which thread int idx = threadIdx.x + blockIdx.x*blockDim.x; double tGamow = GetGamowFactor(aKStarMag[idx]); cuDoubleComplex tExpTermCmplx = GetExpTerm(aKStarMag[idx],aRStarMag[idx],aTheta[idx]); cuDoubleComplex tGTildeCmplx, tHyperGeo1F1Cmplx, tScattLenCmplx; tGTildeCmplx = GTildeInterpolate(aKStarMag[idx],aRStarMag[idx]); tHyperGeo1F1Cmplx = HyperGeo1F1Interpolate(aKStarMag[idx],aRStarMag[idx],aTheta[idx]); tScattLenCmplx = ScattLenInterpolate(aReF0,aImF0,aD0,aKStarMag[idx]); double tResult = AssembleWfSquared(aRStarMag[idx],tGamow,tExpTermCmplx,tGTildeCmplx,tHyperGeo1F1Cmplx,tScattLenCmplx); aWfSquared[idx] = tResult; } //________________________________________________________________________________________________________________ //**************************************************************************************************************** //________________________________________________________________________________________________________________ ParallelWaveFunction::ParallelWaveFunction(int aNThreadsPerBlock, int aNBlocks): fNThreadsPerBlock(aNThreadsPerBlock), fNBlocks(aNBlocks) { cudaSetDeviceFlags(cudaDeviceMapHost); } //________________________________________________________________________________________________________________ ParallelWaveFunction::~ParallelWaveFunction() { checkCudaErrors(cudaFree(d_fGTildeReal)); checkCudaErrors(cudaFree(d_fGTildeImag)); checkCudaErrors(cudaFree(d_fGTildeInfo)); checkCudaErrors(cudaFree(d_fHyperGeo1F1Real)); checkCudaErrors(cudaFree(d_fHyperGeo1F1Imag)); checkCudaErrors(cudaFree(d_fHyperGeo1F1Info)); // checkCudaErrors(cudaFree(d_fCoulombScatteringLengthReal)); // checkCudaErrors(cudaFree(d_fCoulombScatteringLengthImag)); checkCudaErrors(cudaFree(d_fScattLenInfo)); } //________________________________________________________________________________________________________________ void ParallelWaveFunction::LoadGTildeReal(td2dVec &aGTildeReal) { int tNbinsK = aGTildeReal.size(); int tNbinsR = aGTildeReal[0].size(); int tSize = tNbinsK*tNbinsR*sizeof(double); checkCudaErrors(cudaMallocManaged(&d_fGTildeReal, tSize)); int tIndex; for(int iK=0; iK<tNbinsK; iK++) { for(int iR=0; iR<tNbinsR; iR++) { tIndex = iR + iK*tNbinsR; d_fGTildeReal[tIndex] = aGTildeReal[iK][iR]; } } } //________________________________________________________________________________________________________________ void ParallelWaveFunction::LoadGTildeImag(td2dVec &aGTildeImag) { int tNbinsK = aGTildeImag.size(); int tNbinsR = aGTildeImag[0].size(); int tSize = tNbinsK*tNbinsR*sizeof(double); checkCudaErrors(cudaMallocManaged(&d_fGTildeImag, tSize)); int tIndex; for(int iK=0; iK<tNbinsK; iK++) { for(int iR=0; iR<tNbinsR; iR++) { tIndex = iR + iK*tNbinsR; d_fGTildeImag[tIndex] = aGTildeImag[iK][iR]; } } } //________________________________________________________________________________________________________________ void ParallelWaveFunction::LoadHyperGeo1F1Real(td3dVec &aHyperGeo1F1Real) { int tNbinsK = aHyperGeo1F1Real.size(); int tNbinsR = aHyperGeo1F1Real[0].size(); int tNbinsTheta = aHyperGeo1F1Real[0][0].size(); int tSize = tNbinsK*tNbinsR*tNbinsTheta*sizeof(double); checkCudaErrors(cudaMallocManaged(&d_fHyperGeo1F1Real, tSize)); int tIndex; for(int iK=0; iK<tNbinsK; iK++) { for(int iR=0; iR<tNbinsR; iR++) { for(int iTheta=0; iTheta<tNbinsTheta; iTheta++) { tIndex = iTheta + iR*tNbinsTheta + iK*tNbinsTheta*tNbinsR; d_fHyperGeo1F1Real[tIndex] = aHyperGeo1F1Real[iK][iR][iTheta]; } } } } //________________________________________________________________________________________________________________ void ParallelWaveFunction::LoadHyperGeo1F1Imag(td3dVec &aHyperGeo1F1Imag) { int tNbinsK = aHyperGeo1F1Imag.size(); int tNbinsR = aHyperGeo1F1Imag[0].size(); int tNbinsTheta = aHyperGeo1F1Imag[0][0].size(); int tSize = tNbinsK*tNbinsR*tNbinsTheta*sizeof(double); checkCudaErrors(cudaMallocManaged(&d_fHyperGeo1F1Imag, tSize)); int tIndex; for(int iK=0; iK<tNbinsK; iK++) { for(int iR=0; iR<tNbinsR; iR++) { for(int iTheta=0; iTheta<tNbinsTheta; iTheta++) { tIndex = iTheta + iR*tNbinsTheta + iK*tNbinsTheta*tNbinsR; d_fHyperGeo1F1Imag[tIndex] = aHyperGeo1F1Imag[iK][iR][iTheta]; } } } } //________________________________________________________________________________________________________________ void ParallelWaveFunction::LoadScattLenReal(td4dVec &aScattLenReal) { int tNbinsReF0 = aScattLenReal.size(); int tNbinsImF0 = aScattLenReal[0].size(); int tNbinsD0 = aScattLenReal[0][0].size(); int tNbinsK = aScattLenReal[0][0][0].size(); int tSize = tNbinsReF0*tNbinsImF0*tNbinsD0*tNbinsK*sizeof(double); checkCudaErrors(cudaMallocManaged(&d_fCoulombScatteringLengthReal, tSize)); int tIndex; for(int iReF0=0; iReF0<tNbinsReF0; iReF0++) { for(int iImF0=0; iImF0<tNbinsImF0; iImF0++) { for(int iD0=0; iD0<tNbinsD0; iD0++) { for(int iK=0; iK<tNbinsK; iK++) { tIndex = iK + iD0*tNbinsK + iImF0*tNbinsK*tNbinsD0 + iReF0*tNbinsK*tNbinsD0*tNbinsImF0; d_fCoulombScatteringLengthReal[tIndex] = aScattLenReal[iReF0][iImF0][iD0][iK]; } } } } } //________________________________________________________________________________________________________________ void ParallelWaveFunction::LoadScattLenImag(td4dVec &aScattLenImag) { int tNbinsReF0 = aScattLenImag.size(); int tNbinsImF0 = aScattLenImag[0].size(); int tNbinsD0 = aScattLenImag[0][0].size(); int tNbinsK = aScattLenImag[0][0][0].size(); int tSize = tNbinsReF0*tNbinsImF0*tNbinsD0*tNbinsK*sizeof(double); checkCudaErrors(cudaMallocManaged(&d_fCoulombScatteringLengthImag, tSize)); int tIndex; for(int iReF0=0; iReF0<tNbinsReF0; iReF0++) { for(int iImF0=0; iImF0<tNbinsImF0; iImF0++) { for(int iD0=0; iD0<tNbinsD0; iD0++) { for(int iK=0; iK<tNbinsK; iK++) { tIndex = iK + iD0*tNbinsK + iImF0*tNbinsK*tNbinsD0 + iReF0*tNbinsK*tNbinsD0*tNbinsImF0; d_fCoulombScatteringLengthImag[tIndex] = aScattLenImag[iReF0][iImF0][iD0][iK]; } } } } } //________________________________________________________________________________________________________________ void ParallelWaveFunction::LoadScattLenRealSub(td4dVec &aScattLenReal) { int tNbinsReF0 = aScattLenReal.size(); int tNbinsImF0 = aScattLenReal[0].size(); int tNbinsD0 = aScattLenReal[0][0].size(); int tNbinsK = aScattLenReal[0][0][0].size(); int tSize = tNbinsReF0*tNbinsImF0*tNbinsD0*tNbinsK*sizeof(double); checkCudaErrors(cudaMallocManaged(&d_fScattLenRealSubVec, tSize)); int tIndex; for(int iReF0=0; iReF0<tNbinsReF0; iReF0++) { for(int iImF0=0; iImF0<tNbinsImF0; iImF0++) { for(int iD0=0; iD0<tNbinsD0; iD0++) { for(int iK=0; iK<tNbinsK; iK++) { tIndex = iK + iD0*tNbinsK + iImF0*tNbinsK*tNbinsD0 + iReF0*tNbinsK*tNbinsD0*tNbinsImF0; d_fScattLenRealSubVec[tIndex] = aScattLenReal[iReF0][iImF0][iD0][iK]; } } } } } //________________________________________________________________________________________________________________ void ParallelWaveFunction::LoadScattLenImagSub(td4dVec &aScattLenImag) { int tNbinsReF0 = aScattLenImag.size(); int tNbinsImF0 = aScattLenImag[0].size(); int tNbinsD0 = aScattLenImag[0][0].size(); int tNbinsK = aScattLenImag[0][0][0].size(); int tSize = tNbinsReF0*tNbinsImF0*tNbinsD0*tNbinsK*sizeof(double); checkCudaErrors(cudaMallocManaged(&d_fScattLenImagSubVec, tSize)); int tIndex; for(int iReF0=0; iReF0<tNbinsReF0; iReF0++) { for(int iImF0=0; iImF0<tNbinsImF0; iImF0++) { for(int iD0=0; iD0<tNbinsD0; iD0++) { for(int iK=0; iK<tNbinsK; iK++) { tIndex = iK + iD0*tNbinsK + iImF0*tNbinsK*tNbinsD0 + iReF0*tNbinsK*tNbinsD0*tNbinsImF0; d_fScattLenImagSubVec[tIndex] = aScattLenImag[iReF0][iImF0][iD0][iK]; } } } } } //________________________________________________________________________________________________________________ void ParallelWaveFunction::UnLoadScattLenRealSub() { checkCudaErrors(cudaFree(d_fScattLenRealSubVec)); } //________________________________________________________________________________________________________________ void ParallelWaveFunction::UnLoadScattLenImagSub() { checkCudaErrors(cudaFree(d_fScattLenImagSubVec)); } //________________________________________________________________________________________________________________ void ParallelWaveFunction::LoadGTildeInfo(BinInfoGTilde &aBinInfo) { checkCudaErrors(cudaMallocManaged(&d_fGTildeInfo, sizeof(BinInfoGTilde))); d_fGTildeInfo->nBinsK = aBinInfo.nBinsK; d_fGTildeInfo->nBinsR = aBinInfo.nBinsR; d_fGTildeInfo->binWidthK = aBinInfo.binWidthK; d_fGTildeInfo->binWidthR = aBinInfo.binWidthR; d_fGTildeInfo->minK = aBinInfo.minK; d_fGTildeInfo->maxK = aBinInfo.maxK; d_fGTildeInfo->minR = aBinInfo.minR; d_fGTildeInfo->maxR = aBinInfo.maxR; d_fGTildeInfo->minInterpK = aBinInfo.minInterpK; d_fGTildeInfo->maxInterpK = aBinInfo.maxInterpK; d_fGTildeInfo->minInterpR = aBinInfo.minInterpR; d_fGTildeInfo->maxInterpR = aBinInfo.maxInterpR; } //________________________________________________________________________________________________________________ void ParallelWaveFunction::LoadHyperGeo1F1Info(BinInfoHyperGeo1F1 &aBinInfo) { checkCudaErrors(cudaMallocManaged(&d_fHyperGeo1F1Info, sizeof(BinInfoHyperGeo1F1))); d_fHyperGeo1F1Info->nBinsK = aBinInfo.nBinsK; d_fHyperGeo1F1Info->nBinsR = aBinInfo.nBinsR; d_fHyperGeo1F1Info->nBinsTheta = aBinInfo.nBinsTheta; d_fHyperGeo1F1Info->binWidthK = aBinInfo.binWidthK; d_fHyperGeo1F1Info->binWidthR = aBinInfo.binWidthR; d_fHyperGeo1F1Info->binWidthTheta = aBinInfo.binWidthTheta; d_fHyperGeo1F1Info->minK = aBinInfo.minK; d_fHyperGeo1F1Info->maxK = aBinInfo.maxK; d_fHyperGeo1F1Info->minR = aBinInfo.minR; d_fHyperGeo1F1Info->maxR = aBinInfo.maxR; d_fHyperGeo1F1Info->minTheta = aBinInfo.minTheta; d_fHyperGeo1F1Info->maxTheta = aBinInfo.maxTheta; d_fHyperGeo1F1Info->minInterpK = aBinInfo.minInterpK; d_fHyperGeo1F1Info->maxInterpK = aBinInfo.maxInterpK; d_fHyperGeo1F1Info->minInterpR = aBinInfo.minInterpR; d_fHyperGeo1F1Info->maxInterpR = aBinInfo.maxInterpR; d_fHyperGeo1F1Info->minInterpTheta = aBinInfo.minInterpTheta; d_fHyperGeo1F1Info->maxInterpTheta = aBinInfo.maxInterpTheta; } //________________________________________________________________________________________________________________ void ParallelWaveFunction::LoadScattLenInfo(BinInfoScattLen &aBinInfo) { checkCudaErrors(cudaMallocManaged(&d_fScattLenInfo, sizeof(BinInfoScattLen))); d_fScattLenInfo->nBinsReF0 = aBinInfo.nBinsReF0; d_fScattLenInfo->nBinsImF0 = aBinInfo.nBinsImF0; d_fScattLenInfo->nBinsD0 = aBinInfo.nBinsD0; d_fScattLenInfo->nBinsK = aBinInfo.nBinsK; d_fScattLenInfo->binWidthReF0 = aBinInfo.binWidthReF0; d_fScattLenInfo->binWidthImF0 = aBinInfo.binWidthImF0; d_fScattLenInfo->binWidthD0 = aBinInfo.binWidthD0; d_fScattLenInfo->binWidthK = aBinInfo.binWidthK; d_fScattLenInfo->minReF0 = aBinInfo.minReF0; d_fScattLenInfo->maxReF0 = aBinInfo.maxReF0; d_fScattLenInfo->minImF0 = aBinInfo.minImF0; d_fScattLenInfo->maxImF0 = aBinInfo.maxImF0; d_fScattLenInfo->minD0 = aBinInfo.minD0; d_fScattLenInfo->maxD0 = aBinInfo.maxD0; d_fScattLenInfo->minK = aBinInfo.minK; d_fScattLenInfo->maxK = aBinInfo.maxK; d_fScattLenInfo->minInterpReF0 = aBinInfo.minInterpReF0; d_fScattLenInfo->maxInterpReF0 = aBinInfo.maxInterpReF0; d_fScattLenInfo->minInterpImF0 = aBinInfo.minInterpImF0; d_fScattLenInfo->maxInterpImF0 = aBinInfo.maxInterpImF0; d_fScattLenInfo->minInterpD0 = aBinInfo.minInterpD0; d_fScattLenInfo->maxInterpD0 = aBinInfo.maxInterpD0; d_fScattLenInfo->minInterpK = aBinInfo.minInterpK; d_fScattLenInfo->maxInterpK = aBinInfo.maxInterpK; } //________________________________________________________________________________________________________________ //double* ParallelWaveFunction::RunInterpolateWfSquared(td2dVec &aPairs, double aReF0, double aImF0, double aD0) vector<double> ParallelWaveFunction::RunInterpolateWfSquared(td2dVec &aPairs, double aReF0, double aImF0, double aD0) { int tNPairs = aPairs.size(); int tSize = tNPairs*sizeof(double); //---Host arrays and allocations double * h_KStarMag; double * h_RStarMag; double * h_Theta; double * h_WfSquared; /* checkCudaErrors(cudaHostAlloc((void**) &h_KStarMag, tSize, cudaHostAllocMapped)); checkCudaErrors(cudaHostAlloc((void**) &h_RStarMag, tSize, cudaHostAllocMapped)); checkCudaErrors(cudaHostAlloc((void**) &h_Theta, tSize, cudaHostAllocMapped)); checkCudaErrors(cudaHostAlloc((void**) &h_WfSquared, tSize, cudaHostAllocMapped)); */ checkCudaErrors(cudaMallocManaged(&h_KStarMag, tSize)); checkCudaErrors(cudaMallocManaged(&h_RStarMag, tSize)); checkCudaErrors(cudaMallocManaged(&h_Theta, tSize)); checkCudaErrors(cudaMallocManaged(&h_WfSquared, tSize)); for(int i=0; i<tNPairs; i++) { h_KStarMag[i] = aPairs[i][0]; h_RStarMag[i] = aPairs[i][1]; h_Theta[i] = aPairs[i][2]; } /* //---Device arrays and allocations double * d_KStarMag; double * d_RStarMag; double * d_Theta; double * d_WfSquared; checkCudaErrors(cudaHostGetDevicePointer(&d_KStarMag, h_KStarMag, 0)); checkCudaErrors(cudaHostGetDevicePointer(&d_RStarMag, h_RStarMag, 0)); checkCudaErrors(cudaHostGetDevicePointer(&d_Theta, h_Theta, 0)); checkCudaErrors(cudaHostGetDevicePointer(&d_WfSquared, h_WfSquared, 0)); */ //----------Run the kernel----------------------------------------------- GpuTimer timer; timer.Start(); InterpolateWfSquared<<<fNBlocks,fNThreadsPerBlock>>>(h_KStarMag,h_RStarMag,h_Theta,aReF0,aImF0,aD0,h_WfSquared); timer.Stop(); std::cout << "InterpolateWfSquared kernel finished in " << timer.Elapsed() << " ms" << std::endl; //The following is necessary for the host to be able to "see" the changes that have been done checkCudaErrors(cudaDeviceSynchronize()); /* checkCudaErrors(cudaFreeHost(h_KStarMag)); checkCudaErrors(cudaFreeHost(h_RStarMag)); checkCudaErrors(cudaFreeHost(h_Theta)); */ checkCudaErrors(cudaFree(h_KStarMag)); checkCudaErrors(cudaFree(h_RStarMag)); checkCudaErrors(cudaFree(h_Theta)); // return h_WfSquared; vector<double> tReturnVec(tNPairs); for(int i=0; i<tNPairs; i++) { tReturnVec[i] = h_WfSquared[i]; // cout << "i = " << i << endl; // cout << "h_WfSquared[i] = " << h_WfSquared[i] << endl; // cout << "tReturnVec[i] = " << tReturnVec[i] << endl << endl; } // checkCudaErrors(cudaFreeHost(h_WfSquared)); checkCudaErrors(cudaFree(h_WfSquared)); return tReturnVec; }
238e01aa40362b67ecaebad54d815861b896b4e6.hip
// !!! This is a file automatically generated by hipify!!! #include <stdbool.h> #include <stdio.h> #include <string.h> #include <getopt.h> #include <hiprand/hiprand_kernel.h> #include <stdlib.h> #include <hip/hip_runtime.h> #include <sys/time.h> #include "good_addition.cu" #include<chrono> #include<iostream> using namespace std; using namespace std::chrono; int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}}; int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}}; int main(int argc, char **argv) { hipSetDevice(0); char* p;int matrix_len=strtol(argv[1], &p, 10); for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){ for(int block_looper=0;block_looper<20;block_looper++){ int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1]; int *a = NULL; hipMalloc(&a, XSIZE*YSIZE); int *b = NULL; hipMalloc(&b, XSIZE*YSIZE); int *c = NULL; hipMalloc(&c, XSIZE*YSIZE); int len = 1; int iXSIZE= XSIZE; int iYSIZE= YSIZE; while(iXSIZE%BLOCKX!=0) { iXSIZE++; } while(iYSIZE%BLOCKY!=0) { iYSIZE++; } dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY); dim3 threadBlock(BLOCKX, BLOCKY); hipFree(0);hipLaunchKernelGGL(( good_addition), dim3(gridBlock),dim3(threadBlock), 0, 0, a,b,c,len); hipDeviceSynchronize(); for (int loop_counter = 0; loop_counter < 10; ++loop_counter) {hipLaunchKernelGGL(( good_addition), dim3(gridBlock),dim3(threadBlock), 0, 0, a,b,c,len); } auto start = steady_clock::now(); for (int loop_counter = 0; loop_counter < 1000; loop_counter++) {hipLaunchKernelGGL(( good_addition), dim3(gridBlock),dim3(threadBlock), 0, 0, a,b,c,len); } auto end = steady_clock::now(); auto usecs = duration_cast<duration<float, microseconds::period> >(end - start); cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl; } }}
238e01aa40362b67ecaebad54d815861b896b4e6.cu
#include <stdbool.h> #include <stdio.h> #include <string.h> #include <getopt.h> #include <curand_kernel.h> #include <stdlib.h> #include <cuda.h> #include <sys/time.h> #include "good_addition.cu" #include<chrono> #include<iostream> using namespace std; using namespace std::chrono; int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}}; int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}}; int main(int argc, char **argv) { cudaSetDevice(0); char* p;int matrix_len=strtol(argv[1], &p, 10); for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){ for(int block_looper=0;block_looper<20;block_looper++){ int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1]; int *a = NULL; cudaMalloc(&a, XSIZE*YSIZE); int *b = NULL; cudaMalloc(&b, XSIZE*YSIZE); int *c = NULL; cudaMalloc(&c, XSIZE*YSIZE); int len = 1; int iXSIZE= XSIZE; int iYSIZE= YSIZE; while(iXSIZE%BLOCKX!=0) { iXSIZE++; } while(iYSIZE%BLOCKY!=0) { iYSIZE++; } dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY); dim3 threadBlock(BLOCKX, BLOCKY); cudaFree(0); good_addition<<<gridBlock,threadBlock>>>(a,b,c,len); cudaDeviceSynchronize(); for (int loop_counter = 0; loop_counter < 10; ++loop_counter) { good_addition<<<gridBlock,threadBlock>>>(a,b,c,len); } auto start = steady_clock::now(); for (int loop_counter = 0; loop_counter < 1000; loop_counter++) { good_addition<<<gridBlock,threadBlock>>>(a,b,c,len); } auto end = steady_clock::now(); auto usecs = duration_cast<duration<float, microseconds::period> >(end - start); cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl; } }}
468b6d1679e17589b45f80b2c88fb69f8762f00c.hip
// !!! This is a file automatically generated by hipify!!! #include <cudnn.h> #include <stdio.h> #include <hip/hip_runtime.h> #include <malloc.h> #include <cstdlib> #include <time.h> #include <iostream> #include <sys/types.h> #include <errno.h> #include <vector> #include <fstream> #include <string> #include <omp.h> #define TH 1 #define TW 1 #define TC 16 #define C 32 #define N 32 #define H 14 #define W 14 #define TCS ((C-1)/TC + 1) #define THS ((H-1)/TH + 1) #define TWS ((W-1)/TW+1) #define WPAD (TWS*TW + 2) #define R 3 #define S 3 using namespace std; #define checkCUDNN(expression) \ { \ cudnnStatus_t status = (expression); \ if (status != CUDNN_STATUS_SUCCESS) { \ std::cerr << "Error on line " << __LINE__ << ": " \ << cudnnGetErrorString(status) << std::endl; \ std::exit(EXIT_FAILURE); \ } \ } inline void chkerr(hipError_t code) { if (code != hipSuccess) { std::cerr << "ERROR!!!:" << hipGetErrorString(code) <<endl; exit(-1); } } extern "C" __global__ void default_function_kernel0(float* __restrict__ data, float* __restrict__ kernel, float* __restrict__ compute) { float compute_local[1]; __shared__ float pad_temp_shared[448]; __shared__ float kernel_shared[1024]; float pad_temp_shared_local[32]; float kernel_shared_local[32]; compute_local[(0)] = 0.000000e+00f; for (int ry_outer = 0; ry_outer < 3; ++ry_outer) { for (int rx_outer = 0; rx_outer < 3; ++rx_outer) { __syncthreads(); pad_temp_shared[(((((int)threadIdx.z) * 14) + ((int)threadIdx.x)))] = (((((1 <= (((int)blockIdx.y) + ry_outer)) && ((((int)blockIdx.y) + ry_outer) < 15)) && (1 <= (((int)threadIdx.x) + rx_outer))) && ((((int)threadIdx.x) + rx_outer) < 15)) ? data[(((((((((int)threadIdx.z) * 196) + (((int)blockIdx.y) * 14)) + (ry_outer * 14)) + rx_outer) + ((int)threadIdx.x)) - 15))] : 0.000000e+00f); for (int ax0_ax1_fused_ax2_fused_ax3_fused_inner_inner_inner = 0; ax0_ax1_fused_ax2_fused_ax3_fused_inner_inner_inner < 3; ++ax0_ax1_fused_ax2_fused_ax3_fused_inner_inner_inner) { if (((((((int)threadIdx.x) * 3) + ax0_ax1_fused_ax2_fused_ax3_fused_inner_inner_inner) >> 5) + ((int)threadIdx.z)) < 32) { if ((((((int)threadIdx.z) * 32) + (((int)threadIdx.x) * 3)) + ax0_ax1_fused_ax2_fused_ax3_fused_inner_inner_inner) < 1024) { if (((((int)threadIdx.x) * 3) + ax0_ax1_fused_ax2_fused_ax3_fused_inner_inner_inner) < 32) { kernel_shared[((((((int)threadIdx.z) * 32) + (((int)threadIdx.x) * 3)) + ax0_ax1_fused_ax2_fused_ax3_fused_inner_inner_inner))] = kernel[((((((((int)threadIdx.z) * 288) + (((int)threadIdx.x) * 27)) + (ax0_ax1_fused_ax2_fused_ax3_fused_inner_inner_inner * 9)) + (ry_outer * 3)) + rx_outer))]; } } } } __syncthreads(); for (int ax1 = 0; ax1 < 32; ++ax1) { pad_temp_shared_local[(ax1)] = pad_temp_shared[(((ax1 * 14) + ((int)threadIdx.x)))]; } for (int ax11 = 0; ax11 < 32; ++ax11) { kernel_shared_local[(ax11)] = kernel_shared[(((((int)threadIdx.z) * 32) + ax11))]; } for (int rc_inner_inner = 0; rc_inner_inner < 32; ++rc_inner_inner) { compute_local[(0)] = (compute_local[(0)] + (pad_temp_shared_local[(rc_inner_inner)] * kernel_shared_local[(rc_inner_inner)])); } } } compute[((((((int)threadIdx.z) * 196) + (((int)blockIdx.y) * 14)) + ((int)threadIdx.x)))] = compute_local[(0)]; } class ConvGemm{ public: float *cpuKernel; float alpha = 1.0f; float beta = 0.0f; cudnnHandle_t convCudnn; void* d_workspace{nullptr}; size_t workspace_bytes{0}; cudnnTensorDescriptor_t convInputDescriptor; cudnnTensorDescriptor_t convOutputDescriptor; cudnnFilterDescriptor_t convKernelDescriptor; cudnnConvolutionDescriptor_t convDesc; float *output; float *kernel; void initialize(); float *forward(float *input); }; void ConvGemm::initialize(){ hipMalloc(&kernel,sizeof(float)*C*N*9); hipMalloc(&this->output,sizeof(float)*N*H*W); cudnnCreate(&convCudnn); cudnnCreateTensorDescriptor(&convInputDescriptor); cudnnSetTensor4dDescriptor(convInputDescriptor, /*format=*/CUDNN_TENSOR_NCHW, /*dataType=*/CUDNN_DATA_FLOAT, /*batch_size=*/1, /*channels=*/C, /*image_height=*/H, /*image_width=*/W); cudnnCreateFilterDescriptor(&convKernelDescriptor); cudnnSetFilter4dDescriptor(convKernelDescriptor, /*dataType=*/CUDNN_DATA_FLOAT, /*format=*/CUDNN_TENSOR_NCHW, /*out_channels=*/N, /*in_channels=*/C, /*kernel_height=*/R, /*kernel_width=*/S); cudnnCreateConvolutionDescriptor(&convDesc); cudnnSetConvolution2dDescriptor(convDesc, /*pad_height=*/1, /*pad_width=*/1, /*vertical_stride=*/1, /*horizontal_stride=*/1, /*dilation_height=*/1, /*dilation_width=*/1, /*mode=*/CUDNN_CROSS_CORRELATION, CUDNN_DATA_FLOAT); int batch_size{0}, channels{0}, height{0}, width{0}; cudnnGetConvolution2dForwardOutputDim(convDesc, convInputDescriptor, convKernelDescriptor, &batch_size, &channels, &height, &width); cudnnCreateTensorDescriptor(&convOutputDescriptor); cudnnSetTensor4dDescriptor(convOutputDescriptor, /*format=*/CUDNN_TENSOR_NCHW, /*dataType=*/CUDNN_DATA_FLOAT, /*batch_size=*/1, /*channels=*/N, /*image_height=*/H, /*image_width=*/W); cudnnGetConvolutionForwardWorkspaceSize(convCudnn, convInputDescriptor, convKernelDescriptor, convDesc, convOutputDescriptor, CUDNN_CONVOLUTION_FWD_ALGO_IMPLICIT_GEMM, &workspace_bytes); hipMalloc(&d_workspace, workspace_bytes); unsigned int kernelSize = R*S*C*N;//kernel this->cpuKernel = (float *)malloc(kernelSize*sizeof(float)); for(int i=0;i<kernelSize;++i){ this->cpuKernel[i] = 1.0f; } hipMemcpy(kernel,cpuKernel,R*S*C*N*sizeof(float),hipMemcpyHostToDevice); free(cpuKernel); } float * ConvGemm::forward(float *input) { hipMemset(output, 0, 1*N*H*W*sizeof(float)); checkCUDNN(cudnnConvolutionForward(convCudnn, &alpha, convInputDescriptor, input, convKernelDescriptor, kernel, convDesc, CUDNN_CONVOLUTION_FWD_ALGO_IMPLICIT_GEMM, d_workspace, workspace_bytes, &beta, convOutputDescriptor, output)); return output; } class ConvWinogradeNon{ public: float *cpuKernel; float alpha = 1.0f; float beta = 0.0f; cudnnHandle_t convCudnn; void* d_workspace{nullptr}; size_t workspace_bytes{0}; cudnnTensorDescriptor_t convInputDescriptor; cudnnTensorDescriptor_t convOutputDescriptor; cudnnFilterDescriptor_t convKernelDescriptor; cudnnConvolutionDescriptor_t convDesc; float *output; float *kernel; void initialize(); float *forward(float *input); }; void ConvWinogradeNon::initialize(){ hipMalloc(&kernel,sizeof(float)*C*N*9); hipMalloc(&this->output,sizeof(float)*N*H*W); cudnnCreate(&convCudnn); cudnnCreateTensorDescriptor(&convInputDescriptor); cudnnSetTensor4dDescriptor(convInputDescriptor, /*format=*/CUDNN_TENSOR_NCHW, /*dataType=*/CUDNN_DATA_FLOAT, /*batch_size=*/1, /*channels=*/C, /*image_height=*/H, /*image_width=*/W); cudnnCreateFilterDescriptor(&convKernelDescriptor); cudnnSetFilter4dDescriptor(convKernelDescriptor, /*dataType=*/CUDNN_DATA_FLOAT, /*format=*/CUDNN_TENSOR_NCHW, /*out_channels=*/N, /*in_channels=*/C, /*kernel_height=*/R, /*kernel_width=*/S); cudnnCreateConvolutionDescriptor(&convDesc); cudnnSetConvolution2dDescriptor(convDesc, /*pad_height=*/1, /*pad_width=*/1, /*vertical_stride=*/1, /*horizontal_stride=*/1, /*dilation_height=*/1, /*dilation_width=*/1, /*mode=*/CUDNN_CROSS_CORRELATION, CUDNN_DATA_FLOAT); int batch_size{0}, channels{0}, height{0}, width{0}; cudnnGetConvolution2dForwardOutputDim(convDesc, convInputDescriptor, convKernelDescriptor, &batch_size, &channels, &height, &width); cudnnCreateTensorDescriptor(&convOutputDescriptor); cudnnSetTensor4dDescriptor(convOutputDescriptor, /*format=*/CUDNN_TENSOR_NCHW, /*dataType=*/CUDNN_DATA_FLOAT, /*batch_size=*/1, /*channels=*/N, /*image_height=*/H, /*image_width=*/W); cudnnGetConvolutionForwardWorkspaceSize(convCudnn, convInputDescriptor, convKernelDescriptor, convDesc, convOutputDescriptor, CUDNN_CONVOLUTION_FWD_ALGO_WINOGRAD_NONFUSED, &workspace_bytes); hipMalloc(&d_workspace, workspace_bytes); unsigned int kernelSize = R*S*C*N;//kernel this->cpuKernel = (float *)malloc(kernelSize*sizeof(float)); for(int i=0;i<kernelSize;++i){ this->cpuKernel[i] = 1.0f; } hipMemcpy(kernel,cpuKernel,R*S*C*N*sizeof(float),hipMemcpyHostToDevice); free(cpuKernel); } float * ConvWinogradeNon::forward(float *input) { hipMemset(output, 0, 1*N*H*W*sizeof(float)); checkCUDNN(cudnnConvolutionForward(convCudnn, &alpha, convInputDescriptor, input, convKernelDescriptor, kernel, convDesc, CUDNN_CONVOLUTION_FWD_ALGO_WINOGRAD_NONFUSED, d_workspace, workspace_bytes, &beta, convOutputDescriptor, output)); return output; } class ConvFFT{ public: float *cpuKernel; float alpha = 1.0f; float beta = 0.0f; cudnnHandle_t convCudnn; void* d_workspace{nullptr}; size_t workspace_bytes{0}; cudnnTensorDescriptor_t convInputDescriptor; cudnnTensorDescriptor_t convOutputDescriptor; cudnnFilterDescriptor_t convKernelDescriptor; cudnnConvolutionDescriptor_t convDesc; float *output; float *kernel; void initialize(); float *forward(float *input); }; void ConvFFT::initialize(){ hipMalloc(&kernel,sizeof(float)*C*N*9); hipMalloc(&this->output,sizeof(float)*N*H*W); cudnnCreate(&convCudnn); cudnnCreateTensorDescriptor(&convInputDescriptor); cudnnSetTensor4dDescriptor(convInputDescriptor, /*format=*/CUDNN_TENSOR_NCHW, /*dataType=*/CUDNN_DATA_FLOAT, /*batch_size=*/1, /*channels=*/C, /*image_height=*/H, /*image_width=*/W); cudnnCreateFilterDescriptor(&convKernelDescriptor); cudnnSetFilter4dDescriptor(convKernelDescriptor, /*dataType=*/CUDNN_DATA_FLOAT, /*format=*/CUDNN_TENSOR_NCHW, /*out_channels=*/N, /*in_channels=*/C, /*kernel_height=*/R, /*kernel_width=*/S); cudnnCreateConvolutionDescriptor(&convDesc); cudnnSetConvolution2dDescriptor(convDesc, /*pad_height=*/1, /*pad_width=*/1, /*vertical_stride=*/1, /*horizontal_stride=*/1, /*dilation_height=*/1, /*dilation_width=*/1, /*mode=*/CUDNN_CROSS_CORRELATION, CUDNN_DATA_FLOAT); int batch_size{0}, channels{0}, height{0}, width{0}; cudnnGetConvolution2dForwardOutputDim(convDesc, convInputDescriptor, convKernelDescriptor, &batch_size, &channels, &height, &width); cudnnCreateTensorDescriptor(&convOutputDescriptor); cudnnSetTensor4dDescriptor(convOutputDescriptor, /*format=*/CUDNN_TENSOR_NCHW, /*dataType=*/CUDNN_DATA_FLOAT, /*batch_size=*/1, /*channels=*/N, /*image_height=*/H, /*image_width=*/W); cudnnGetConvolutionForwardWorkspaceSize(convCudnn, convInputDescriptor, convKernelDescriptor, convDesc, convOutputDescriptor, CUDNN_CONVOLUTION_FWD_ALGO_FFT, &workspace_bytes); hipMalloc(&d_workspace, workspace_bytes); unsigned int kernelSize = R*S*C*N;//kernel this->cpuKernel = (float *)malloc(kernelSize*sizeof(float)); for(int i=0;i<kernelSize;++i){ this->cpuKernel[i] = 1.0f; } hipMemcpy(kernel,cpuKernel,R*S*C*N*sizeof(float),hipMemcpyHostToDevice); free(cpuKernel); } float * ConvFFT::forward(float *input) { hipMemset(output, 0, 1*N*H*W*sizeof(float)); checkCUDNN(cudnnConvolutionForward(convCudnn, &alpha, convInputDescriptor, input, convKernelDescriptor, kernel, convDesc, CUDNN_CONVOLUTION_FWD_ALGO_FFT, d_workspace, workspace_bytes, &beta, convOutputDescriptor, output)); return output; } __device__ void load_input_2_shared_memory(float *input, float *shared_input, unsigned int h_start, unsigned int h_end, unsigned int h_offset, unsigned int c_start, unsigned int warp_id, unsigned int lane_id, unsigned int warp_size){ switch(h_offset){ case 0: for(unsigned int c = warp_id; c<TC; c+=TWS){ for(unsigned int i=lane_id; i<(h_end - h_start) * W; i+=warp_size){ unsigned int r = i/W; unsigned int s = i%W; shared_input[c*(TH + 2)*(WPAD) + r * WPAD + s + 1] = input[(c_start + c) * H * W + h_start * W + i]; } } break; case 1: for(unsigned int c = warp_id; c<TC; c+=TWS){ for(unsigned int i=lane_id; i<(h_end - h_start) * W; i+=warp_size){ unsigned int r = i/W; unsigned int s = i%W; shared_input[c*(TH + 2)*(WPAD) + (1 + r) * WPAD + s + 1] = input[(c_start + c) * H * W + h_start * W + i]; } } break; } } __device__ __forceinline__ void switch_write_back(unsigned int write_h, unsigned int write_w, unsigned int h_out_start, unsigned int w_out_start, unsigned int n, float * outputs, float * temp_result){ switch(write_h){ case 1: switch(write_w){ case 1: #pragma unroll for (unsigned int th = 0; th < 1; ++th) { #pragma unroll for (unsigned int tw = 0; tw < 1; ++tw) { atomicAdd(&outputs[n*H*W+(h_out_start + th) * W+(w_out_start + tw)],temp_result[(th * TW + tw)]); } } break; } break; } } __global__ void conv2d(float * __restrict__ input,const float * __restrict__ kernel, float * __restrict__ outputs){ extern __shared__ float shared_input[]; const unsigned int tile_id = blockIdx.x; const unsigned int tc_id = tile_id / THS; const unsigned int th_id = tile_id % THS; const unsigned int tw_id = threadIdx.x / N; const int h_out_start = th_id * TH; const int w_out_start = tw_id * TW; const unsigned int warp_id = tw_id; const unsigned int lane_id = threadIdx.x % N; float data_array[9]; float temp_result[TH*TW] = {0.0f}; for(unsigned int i=threadIdx.x;i<TC*(TH+2)*WPAD;i+=blockDim.x){ shared_input[i] = 0.0f; } unsigned int n = lane_id; unsigned int c_offset = tc_id * TC; int h_offset = (h_out_start == 0)?1:0; int h_padded_start = h_out_start; int h_padded_end = min(h_padded_start + TH + 2, H + 2); int h_non_padded_start = max(h_out_start - 1, 0); int h_non_padded_end = min(H, h_padded_end - 1); __syncthreads(); load_input_2_shared_memory(input, shared_input, h_non_padded_start, h_non_padded_end, h_offset, c_offset, warp_id, lane_id, N); __syncthreads(); #pragma unroll for(unsigned int c=0;c<TC;c++){ #pragma unroll for(unsigned int r=0;r<R;++r){ #pragma unroll for(unsigned int s=0;s<S;++s){ data_array[r*S+s] = kernel[(c + c_offset)*N*9+r*3*N+s*N+n]; } } temp_result[0] += shared_input[c*(TH+2)*(WPAD) + 0 * WPAD + tw_id * TW + 0]*data_array[0]; temp_result[0] += shared_input[c*(TH+2)*(WPAD) + 0 * WPAD + tw_id * TW + 1]*data_array[1]; temp_result[0] += shared_input[c*(TH+2)*(WPAD) + 0 * WPAD + tw_id * TW + 2]*data_array[2]; temp_result[0] += shared_input[c*(TH+2)*(WPAD) + 1 * WPAD + tw_id * TW + 0]*data_array[3]; temp_result[0] += shared_input[c*(TH+2)*(WPAD) + 1 * WPAD + tw_id * TW + 1]*data_array[4]; temp_result[0] += shared_input[c*(TH+2)*(WPAD) + 1 * WPAD + tw_id * TW + 2]*data_array[5]; temp_result[0] += shared_input[c*(TH+2)*(WPAD) + 2 * WPAD + tw_id * TW + 0]*data_array[6]; temp_result[0] += shared_input[c*(TH+2)*(WPAD) + 2 * WPAD + tw_id * TW + 1]*data_array[7]; temp_result[0] += shared_input[c*(TH+2)*(WPAD) + 2 * WPAD + tw_id * TW + 2]*data_array[8]; } switch_write_back(min(TH, H - h_out_start), min(TW, W - w_out_start), h_out_start, w_out_start, n, outputs, temp_result); } float check_diff(float *x, float *y, unsigned int size){ float diff = 0.0f; #pragma omp parallel for reduction(+ : diff) for(unsigned int i=0;i<size;++i){ diff += abs(x[i] - y[i]); } return diff; } int main(void){ float *input = new float[C*H*W]; time_t t; float *matrix; hipMalloc(&matrix,C*(TH+2)*(TW+2)*THS*TWS*sizeof(float)); hipMemset(matrix,0,C*(TH+2)*(TW+2)*THS*TWS*sizeof(float)); srand((unsigned) time(&t)); for(int i =0;i<C*H*W;++i){ input[i] = rand() % 10; } float *device_input; hipMalloc(&device_input,C*H*W*sizeof(float)); hipMemcpy(device_input,input,C*H*W*sizeof(float),hipMemcpyHostToDevice); float *K = new float[C*N*9]; for(int i=0;i<C*N*9;++i){ K[i] = 1.0f; } ConvGemm convGemm; convGemm.initialize(); ConvWinogradeNon convWinogradeNon; convWinogradeNon.initialize(); ConvFFT convFFT; convFFT.initialize(); float *out_cudnn; float *out_cudnn_host = new float[N*H*W]; hipEvent_t event_start; hipEvent_t event_stop; hipEventCreate(&event_start); hipEventCreate(&event_stop); out_cudnn = convGemm.forward(device_input); hipMemcpy(out_cudnn_host,out_cudnn,N*H*W*sizeof(float),hipMemcpyDeviceToHost); out_cudnn = convFFT.forward(device_input); out_cudnn = convWinogradeNon.forward(device_input); float *device_K; float *device_out; hipMalloc(&device_out,H*W*N*sizeof(float)); hipMemset(device_out,0,H*W*N*sizeof(float)); hipMalloc(&device_K,C*N*9*sizeof(float)); hipMemcpy(device_K,K,C*N*9*sizeof(float),hipMemcpyHostToDevice); hipEventRecord(event_start); convGemm.forward(device_input); hipEventRecord(event_stop); hipEventSynchronize(event_stop); float cudnnGemmTime; hipEventElapsedTime(&cudnnGemmTime, event_start, event_stop); hipEventRecord(event_start); convWinogradeNon.forward(device_input); hipEventRecord(event_stop); hipEventSynchronize(event_stop); float cudnnWinogradeTimeNon; hipEventElapsedTime(&cudnnWinogradeTimeNon, event_start, event_stop); hipEventRecord(event_start); convFFT.forward(device_input); hipEventRecord(event_stop); hipEventSynchronize(event_stop); float cudnnFFTTime; hipEventElapsedTime(&cudnnFFTTime, event_start, event_stop); dim3 grid(1,14,1); dim3 block(14,1,32); hipEventRecord(event_start); hipLaunchKernelGGL(( default_function_kernel0), dim3(grid), dim3(block), 0, 0, device_input, device_K, device_out); hipEventRecord(event_stop); hipEventSynchronize(event_stop); float time_tvm; hipEventElapsedTime(&time_tvm, event_start, event_stop); float *out_tvm = new float[N*H*W]; hipMemcpy(out_tvm,device_out,N*H*W*sizeof(float),hipMemcpyDeviceToHost); hipMemset(device_out, 0, sizeof(float)*N*H*W); chkerr(hipFuncSetAttribute(conv2d,hipFuncAttributeMaxDynamicSharedMemorySize, TC*(TH+2)*(WPAD)*4)); hipEventRecord(event_start); hipLaunchKernelGGL(( conv2d), dim3(TCS*THS), dim3(N * TWS), TC*(TH+2)*(WPAD)*4, 0, device_input, device_K, device_out); hipEventRecord(event_stop); hipEventSynchronize(event_stop); float time_tdc; hipEventElapsedTime(&time_tdc, event_start, event_stop); float *out_tdc = new float[N*H*W]; hipMemcpy(out_tdc,device_out,N*H*W*sizeof(float),hipMemcpyDeviceToHost); ofstream outfile; char buffer[1000]; int ret = sprintf(buffer,"%d,%d,%d,%d,%f,%f,%f,%f,%f,%f,%f,%f,%f\n",N,C,H,W, cudnnFFTTime,cudnnWinogradeTimeNon,cudnnGemmTime,time_tvm,time_tdc, cudnnFFTTime/time_tdc,cudnnWinogradeTimeNon/time_tdc,cudnnGemmTime/time_tdc,time_tvm/time_tdc); outfile.open("../../evaluation_outcome/A100-layers-eval-modeling.csv", std::ios_base::app); outfile << buffer; float difference = check_diff(out_tvm, out_tdc, N*H*W); cout<<N<<","<<C<<","<<H<<","<<W<<","<<cudnnFFTTime<<","<<cudnnWinogradeTimeNon<<","<<cudnnGemmTime<<","<< time_tvm<<","<<time_tdc<<","<<cudnnFFTTime/time_tdc<<","<< cudnnWinogradeTimeNon/time_tdc<<","<<cudnnGemmTime/time_tdc<<","<<time_tvm/time_tdc<<endl; return 0; }
468b6d1679e17589b45f80b2c88fb69f8762f00c.cu
#include <cudnn.h> #include <stdio.h> #include <cuda.h> #include <malloc.h> #include <cstdlib> #include <time.h> #include <iostream> #include <sys/types.h> #include <errno.h> #include <vector> #include <fstream> #include <string> #include <omp.h> #define TH 1 #define TW 1 #define TC 16 #define C 32 #define N 32 #define H 14 #define W 14 #define TCS ((C-1)/TC + 1) #define THS ((H-1)/TH + 1) #define TWS ((W-1)/TW+1) #define WPAD (TWS*TW + 2) #define R 3 #define S 3 using namespace std; #define checkCUDNN(expression) \ { \ cudnnStatus_t status = (expression); \ if (status != CUDNN_STATUS_SUCCESS) { \ std::cerr << "Error on line " << __LINE__ << ": " \ << cudnnGetErrorString(status) << std::endl; \ std::exit(EXIT_FAILURE); \ } \ } inline void chkerr(cudaError_t code) { if (code != cudaSuccess) { std::cerr << "ERROR!!!:" << cudaGetErrorString(code) <<endl; exit(-1); } } extern "C" __global__ void default_function_kernel0(float* __restrict__ data, float* __restrict__ kernel, float* __restrict__ compute) { float compute_local[1]; __shared__ float pad_temp_shared[448]; __shared__ float kernel_shared[1024]; float pad_temp_shared_local[32]; float kernel_shared_local[32]; compute_local[(0)] = 0.000000e+00f; for (int ry_outer = 0; ry_outer < 3; ++ry_outer) { for (int rx_outer = 0; rx_outer < 3; ++rx_outer) { __syncthreads(); pad_temp_shared[(((((int)threadIdx.z) * 14) + ((int)threadIdx.x)))] = (((((1 <= (((int)blockIdx.y) + ry_outer)) && ((((int)blockIdx.y) + ry_outer) < 15)) && (1 <= (((int)threadIdx.x) + rx_outer))) && ((((int)threadIdx.x) + rx_outer) < 15)) ? data[(((((((((int)threadIdx.z) * 196) + (((int)blockIdx.y) * 14)) + (ry_outer * 14)) + rx_outer) + ((int)threadIdx.x)) - 15))] : 0.000000e+00f); for (int ax0_ax1_fused_ax2_fused_ax3_fused_inner_inner_inner = 0; ax0_ax1_fused_ax2_fused_ax3_fused_inner_inner_inner < 3; ++ax0_ax1_fused_ax2_fused_ax3_fused_inner_inner_inner) { if (((((((int)threadIdx.x) * 3) + ax0_ax1_fused_ax2_fused_ax3_fused_inner_inner_inner) >> 5) + ((int)threadIdx.z)) < 32) { if ((((((int)threadIdx.z) * 32) + (((int)threadIdx.x) * 3)) + ax0_ax1_fused_ax2_fused_ax3_fused_inner_inner_inner) < 1024) { if (((((int)threadIdx.x) * 3) + ax0_ax1_fused_ax2_fused_ax3_fused_inner_inner_inner) < 32) { kernel_shared[((((((int)threadIdx.z) * 32) + (((int)threadIdx.x) * 3)) + ax0_ax1_fused_ax2_fused_ax3_fused_inner_inner_inner))] = kernel[((((((((int)threadIdx.z) * 288) + (((int)threadIdx.x) * 27)) + (ax0_ax1_fused_ax2_fused_ax3_fused_inner_inner_inner * 9)) + (ry_outer * 3)) + rx_outer))]; } } } } __syncthreads(); for (int ax1 = 0; ax1 < 32; ++ax1) { pad_temp_shared_local[(ax1)] = pad_temp_shared[(((ax1 * 14) + ((int)threadIdx.x)))]; } for (int ax11 = 0; ax11 < 32; ++ax11) { kernel_shared_local[(ax11)] = kernel_shared[(((((int)threadIdx.z) * 32) + ax11))]; } for (int rc_inner_inner = 0; rc_inner_inner < 32; ++rc_inner_inner) { compute_local[(0)] = (compute_local[(0)] + (pad_temp_shared_local[(rc_inner_inner)] * kernel_shared_local[(rc_inner_inner)])); } } } compute[((((((int)threadIdx.z) * 196) + (((int)blockIdx.y) * 14)) + ((int)threadIdx.x)))] = compute_local[(0)]; } class ConvGemm{ public: float *cpuKernel; float alpha = 1.0f; float beta = 0.0f; cudnnHandle_t convCudnn; void* d_workspace{nullptr}; size_t workspace_bytes{0}; cudnnTensorDescriptor_t convInputDescriptor; cudnnTensorDescriptor_t convOutputDescriptor; cudnnFilterDescriptor_t convKernelDescriptor; cudnnConvolutionDescriptor_t convDesc; float *output; float *kernel; void initialize(); float *forward(float *input); }; void ConvGemm::initialize(){ cudaMalloc(&kernel,sizeof(float)*C*N*9); cudaMalloc(&this->output,sizeof(float)*N*H*W); cudnnCreate(&convCudnn); cudnnCreateTensorDescriptor(&convInputDescriptor); cudnnSetTensor4dDescriptor(convInputDescriptor, /*format=*/CUDNN_TENSOR_NCHW, /*dataType=*/CUDNN_DATA_FLOAT, /*batch_size=*/1, /*channels=*/C, /*image_height=*/H, /*image_width=*/W); cudnnCreateFilterDescriptor(&convKernelDescriptor); cudnnSetFilter4dDescriptor(convKernelDescriptor, /*dataType=*/CUDNN_DATA_FLOAT, /*format=*/CUDNN_TENSOR_NCHW, /*out_channels=*/N, /*in_channels=*/C, /*kernel_height=*/R, /*kernel_width=*/S); cudnnCreateConvolutionDescriptor(&convDesc); cudnnSetConvolution2dDescriptor(convDesc, /*pad_height=*/1, /*pad_width=*/1, /*vertical_stride=*/1, /*horizontal_stride=*/1, /*dilation_height=*/1, /*dilation_width=*/1, /*mode=*/CUDNN_CROSS_CORRELATION, CUDNN_DATA_FLOAT); int batch_size{0}, channels{0}, height{0}, width{0}; cudnnGetConvolution2dForwardOutputDim(convDesc, convInputDescriptor, convKernelDescriptor, &batch_size, &channels, &height, &width); cudnnCreateTensorDescriptor(&convOutputDescriptor); cudnnSetTensor4dDescriptor(convOutputDescriptor, /*format=*/CUDNN_TENSOR_NCHW, /*dataType=*/CUDNN_DATA_FLOAT, /*batch_size=*/1, /*channels=*/N, /*image_height=*/H, /*image_width=*/W); cudnnGetConvolutionForwardWorkspaceSize(convCudnn, convInputDescriptor, convKernelDescriptor, convDesc, convOutputDescriptor, CUDNN_CONVOLUTION_FWD_ALGO_IMPLICIT_GEMM, &workspace_bytes); cudaMalloc(&d_workspace, workspace_bytes); unsigned int kernelSize = R*S*C*N;//kernel this->cpuKernel = (float *)malloc(kernelSize*sizeof(float)); for(int i=0;i<kernelSize;++i){ this->cpuKernel[i] = 1.0f; } cudaMemcpy(kernel,cpuKernel,R*S*C*N*sizeof(float),cudaMemcpyHostToDevice); free(cpuKernel); } float * ConvGemm::forward(float *input) { cudaMemset(output, 0, 1*N*H*W*sizeof(float)); checkCUDNN(cudnnConvolutionForward(convCudnn, &alpha, convInputDescriptor, input, convKernelDescriptor, kernel, convDesc, CUDNN_CONVOLUTION_FWD_ALGO_IMPLICIT_GEMM, d_workspace, workspace_bytes, &beta, convOutputDescriptor, output)); return output; } class ConvWinogradeNon{ public: float *cpuKernel; float alpha = 1.0f; float beta = 0.0f; cudnnHandle_t convCudnn; void* d_workspace{nullptr}; size_t workspace_bytes{0}; cudnnTensorDescriptor_t convInputDescriptor; cudnnTensorDescriptor_t convOutputDescriptor; cudnnFilterDescriptor_t convKernelDescriptor; cudnnConvolutionDescriptor_t convDesc; float *output; float *kernel; void initialize(); float *forward(float *input); }; void ConvWinogradeNon::initialize(){ cudaMalloc(&kernel,sizeof(float)*C*N*9); cudaMalloc(&this->output,sizeof(float)*N*H*W); cudnnCreate(&convCudnn); cudnnCreateTensorDescriptor(&convInputDescriptor); cudnnSetTensor4dDescriptor(convInputDescriptor, /*format=*/CUDNN_TENSOR_NCHW, /*dataType=*/CUDNN_DATA_FLOAT, /*batch_size=*/1, /*channels=*/C, /*image_height=*/H, /*image_width=*/W); cudnnCreateFilterDescriptor(&convKernelDescriptor); cudnnSetFilter4dDescriptor(convKernelDescriptor, /*dataType=*/CUDNN_DATA_FLOAT, /*format=*/CUDNN_TENSOR_NCHW, /*out_channels=*/N, /*in_channels=*/C, /*kernel_height=*/R, /*kernel_width=*/S); cudnnCreateConvolutionDescriptor(&convDesc); cudnnSetConvolution2dDescriptor(convDesc, /*pad_height=*/1, /*pad_width=*/1, /*vertical_stride=*/1, /*horizontal_stride=*/1, /*dilation_height=*/1, /*dilation_width=*/1, /*mode=*/CUDNN_CROSS_CORRELATION, CUDNN_DATA_FLOAT); int batch_size{0}, channels{0}, height{0}, width{0}; cudnnGetConvolution2dForwardOutputDim(convDesc, convInputDescriptor, convKernelDescriptor, &batch_size, &channels, &height, &width); cudnnCreateTensorDescriptor(&convOutputDescriptor); cudnnSetTensor4dDescriptor(convOutputDescriptor, /*format=*/CUDNN_TENSOR_NCHW, /*dataType=*/CUDNN_DATA_FLOAT, /*batch_size=*/1, /*channels=*/N, /*image_height=*/H, /*image_width=*/W); cudnnGetConvolutionForwardWorkspaceSize(convCudnn, convInputDescriptor, convKernelDescriptor, convDesc, convOutputDescriptor, CUDNN_CONVOLUTION_FWD_ALGO_WINOGRAD_NONFUSED, &workspace_bytes); cudaMalloc(&d_workspace, workspace_bytes); unsigned int kernelSize = R*S*C*N;//kernel this->cpuKernel = (float *)malloc(kernelSize*sizeof(float)); for(int i=0;i<kernelSize;++i){ this->cpuKernel[i] = 1.0f; } cudaMemcpy(kernel,cpuKernel,R*S*C*N*sizeof(float),cudaMemcpyHostToDevice); free(cpuKernel); } float * ConvWinogradeNon::forward(float *input) { cudaMemset(output, 0, 1*N*H*W*sizeof(float)); checkCUDNN(cudnnConvolutionForward(convCudnn, &alpha, convInputDescriptor, input, convKernelDescriptor, kernel, convDesc, CUDNN_CONVOLUTION_FWD_ALGO_WINOGRAD_NONFUSED, d_workspace, workspace_bytes, &beta, convOutputDescriptor, output)); return output; } class ConvFFT{ public: float *cpuKernel; float alpha = 1.0f; float beta = 0.0f; cudnnHandle_t convCudnn; void* d_workspace{nullptr}; size_t workspace_bytes{0}; cudnnTensorDescriptor_t convInputDescriptor; cudnnTensorDescriptor_t convOutputDescriptor; cudnnFilterDescriptor_t convKernelDescriptor; cudnnConvolutionDescriptor_t convDesc; float *output; float *kernel; void initialize(); float *forward(float *input); }; void ConvFFT::initialize(){ cudaMalloc(&kernel,sizeof(float)*C*N*9); cudaMalloc(&this->output,sizeof(float)*N*H*W); cudnnCreate(&convCudnn); cudnnCreateTensorDescriptor(&convInputDescriptor); cudnnSetTensor4dDescriptor(convInputDescriptor, /*format=*/CUDNN_TENSOR_NCHW, /*dataType=*/CUDNN_DATA_FLOAT, /*batch_size=*/1, /*channels=*/C, /*image_height=*/H, /*image_width=*/W); cudnnCreateFilterDescriptor(&convKernelDescriptor); cudnnSetFilter4dDescriptor(convKernelDescriptor, /*dataType=*/CUDNN_DATA_FLOAT, /*format=*/CUDNN_TENSOR_NCHW, /*out_channels=*/N, /*in_channels=*/C, /*kernel_height=*/R, /*kernel_width=*/S); cudnnCreateConvolutionDescriptor(&convDesc); cudnnSetConvolution2dDescriptor(convDesc, /*pad_height=*/1, /*pad_width=*/1, /*vertical_stride=*/1, /*horizontal_stride=*/1, /*dilation_height=*/1, /*dilation_width=*/1, /*mode=*/CUDNN_CROSS_CORRELATION, CUDNN_DATA_FLOAT); int batch_size{0}, channels{0}, height{0}, width{0}; cudnnGetConvolution2dForwardOutputDim(convDesc, convInputDescriptor, convKernelDescriptor, &batch_size, &channels, &height, &width); cudnnCreateTensorDescriptor(&convOutputDescriptor); cudnnSetTensor4dDescriptor(convOutputDescriptor, /*format=*/CUDNN_TENSOR_NCHW, /*dataType=*/CUDNN_DATA_FLOAT, /*batch_size=*/1, /*channels=*/N, /*image_height=*/H, /*image_width=*/W); cudnnGetConvolutionForwardWorkspaceSize(convCudnn, convInputDescriptor, convKernelDescriptor, convDesc, convOutputDescriptor, CUDNN_CONVOLUTION_FWD_ALGO_FFT, &workspace_bytes); cudaMalloc(&d_workspace, workspace_bytes); unsigned int kernelSize = R*S*C*N;//kernel this->cpuKernel = (float *)malloc(kernelSize*sizeof(float)); for(int i=0;i<kernelSize;++i){ this->cpuKernel[i] = 1.0f; } cudaMemcpy(kernel,cpuKernel,R*S*C*N*sizeof(float),cudaMemcpyHostToDevice); free(cpuKernel); } float * ConvFFT::forward(float *input) { cudaMemset(output, 0, 1*N*H*W*sizeof(float)); checkCUDNN(cudnnConvolutionForward(convCudnn, &alpha, convInputDescriptor, input, convKernelDescriptor, kernel, convDesc, CUDNN_CONVOLUTION_FWD_ALGO_FFT, d_workspace, workspace_bytes, &beta, convOutputDescriptor, output)); return output; } __device__ void load_input_2_shared_memory(float *input, float *shared_input, unsigned int h_start, unsigned int h_end, unsigned int h_offset, unsigned int c_start, unsigned int warp_id, unsigned int lane_id, unsigned int warp_size){ switch(h_offset){ case 0: for(unsigned int c = warp_id; c<TC; c+=TWS){ for(unsigned int i=lane_id; i<(h_end - h_start) * W; i+=warp_size){ unsigned int r = i/W; unsigned int s = i%W; shared_input[c*(TH + 2)*(WPAD) + r * WPAD + s + 1] = input[(c_start + c) * H * W + h_start * W + i]; } } break; case 1: for(unsigned int c = warp_id; c<TC; c+=TWS){ for(unsigned int i=lane_id; i<(h_end - h_start) * W; i+=warp_size){ unsigned int r = i/W; unsigned int s = i%W; shared_input[c*(TH + 2)*(WPAD) + (1 + r) * WPAD + s + 1] = input[(c_start + c) * H * W + h_start * W + i]; } } break; } } __device__ __forceinline__ void switch_write_back(unsigned int write_h, unsigned int write_w, unsigned int h_out_start, unsigned int w_out_start, unsigned int n, float * outputs, float * temp_result){ switch(write_h){ case 1: switch(write_w){ case 1: #pragma unroll for (unsigned int th = 0; th < 1; ++th) { #pragma unroll for (unsigned int tw = 0; tw < 1; ++tw) { atomicAdd(&outputs[n*H*W+(h_out_start + th) * W+(w_out_start + tw)],temp_result[(th * TW + tw)]); } } break; } break; } } __global__ void conv2d(float * __restrict__ input,const float * __restrict__ kernel, float * __restrict__ outputs){ extern __shared__ float shared_input[]; const unsigned int tile_id = blockIdx.x; const unsigned int tc_id = tile_id / THS; const unsigned int th_id = tile_id % THS; const unsigned int tw_id = threadIdx.x / N; const int h_out_start = th_id * TH; const int w_out_start = tw_id * TW; const unsigned int warp_id = tw_id; const unsigned int lane_id = threadIdx.x % N; float data_array[9]; float temp_result[TH*TW] = {0.0f}; for(unsigned int i=threadIdx.x;i<TC*(TH+2)*WPAD;i+=blockDim.x){ shared_input[i] = 0.0f; } unsigned int n = lane_id; unsigned int c_offset = tc_id * TC; int h_offset = (h_out_start == 0)?1:0; int h_padded_start = h_out_start; int h_padded_end = min(h_padded_start + TH + 2, H + 2); int h_non_padded_start = max(h_out_start - 1, 0); int h_non_padded_end = min(H, h_padded_end - 1); __syncthreads(); load_input_2_shared_memory(input, shared_input, h_non_padded_start, h_non_padded_end, h_offset, c_offset, warp_id, lane_id, N); __syncthreads(); #pragma unroll for(unsigned int c=0;c<TC;c++){ #pragma unroll for(unsigned int r=0;r<R;++r){ #pragma unroll for(unsigned int s=0;s<S;++s){ data_array[r*S+s] = kernel[(c + c_offset)*N*9+r*3*N+s*N+n]; } } temp_result[0] += shared_input[c*(TH+2)*(WPAD) + 0 * WPAD + tw_id * TW + 0]*data_array[0]; temp_result[0] += shared_input[c*(TH+2)*(WPAD) + 0 * WPAD + tw_id * TW + 1]*data_array[1]; temp_result[0] += shared_input[c*(TH+2)*(WPAD) + 0 * WPAD + tw_id * TW + 2]*data_array[2]; temp_result[0] += shared_input[c*(TH+2)*(WPAD) + 1 * WPAD + tw_id * TW + 0]*data_array[3]; temp_result[0] += shared_input[c*(TH+2)*(WPAD) + 1 * WPAD + tw_id * TW + 1]*data_array[4]; temp_result[0] += shared_input[c*(TH+2)*(WPAD) + 1 * WPAD + tw_id * TW + 2]*data_array[5]; temp_result[0] += shared_input[c*(TH+2)*(WPAD) + 2 * WPAD + tw_id * TW + 0]*data_array[6]; temp_result[0] += shared_input[c*(TH+2)*(WPAD) + 2 * WPAD + tw_id * TW + 1]*data_array[7]; temp_result[0] += shared_input[c*(TH+2)*(WPAD) + 2 * WPAD + tw_id * TW + 2]*data_array[8]; } switch_write_back(min(TH, H - h_out_start), min(TW, W - w_out_start), h_out_start, w_out_start, n, outputs, temp_result); } float check_diff(float *x, float *y, unsigned int size){ float diff = 0.0f; #pragma omp parallel for reduction(+ : diff) for(unsigned int i=0;i<size;++i){ diff += abs(x[i] - y[i]); } return diff; } int main(void){ float *input = new float[C*H*W]; time_t t; float *matrix; cudaMalloc(&matrix,C*(TH+2)*(TW+2)*THS*TWS*sizeof(float)); cudaMemset(matrix,0,C*(TH+2)*(TW+2)*THS*TWS*sizeof(float)); srand((unsigned) time(&t)); for(int i =0;i<C*H*W;++i){ input[i] = rand() % 10; } float *device_input; cudaMalloc(&device_input,C*H*W*sizeof(float)); cudaMemcpy(device_input,input,C*H*W*sizeof(float),cudaMemcpyHostToDevice); float *K = new float[C*N*9]; for(int i=0;i<C*N*9;++i){ K[i] = 1.0f; } ConvGemm convGemm; convGemm.initialize(); ConvWinogradeNon convWinogradeNon; convWinogradeNon.initialize(); ConvFFT convFFT; convFFT.initialize(); float *out_cudnn; float *out_cudnn_host = new float[N*H*W]; cudaEvent_t event_start; cudaEvent_t event_stop; cudaEventCreate(&event_start); cudaEventCreate(&event_stop); out_cudnn = convGemm.forward(device_input); cudaMemcpy(out_cudnn_host,out_cudnn,N*H*W*sizeof(float),cudaMemcpyDeviceToHost); out_cudnn = convFFT.forward(device_input); out_cudnn = convWinogradeNon.forward(device_input); float *device_K; float *device_out; cudaMalloc(&device_out,H*W*N*sizeof(float)); cudaMemset(device_out,0,H*W*N*sizeof(float)); cudaMalloc(&device_K,C*N*9*sizeof(float)); cudaMemcpy(device_K,K,C*N*9*sizeof(float),cudaMemcpyHostToDevice); cudaEventRecord(event_start); convGemm.forward(device_input); cudaEventRecord(event_stop); cudaEventSynchronize(event_stop); float cudnnGemmTime; cudaEventElapsedTime(&cudnnGemmTime, event_start, event_stop); cudaEventRecord(event_start); convWinogradeNon.forward(device_input); cudaEventRecord(event_stop); cudaEventSynchronize(event_stop); float cudnnWinogradeTimeNon; cudaEventElapsedTime(&cudnnWinogradeTimeNon, event_start, event_stop); cudaEventRecord(event_start); convFFT.forward(device_input); cudaEventRecord(event_stop); cudaEventSynchronize(event_stop); float cudnnFFTTime; cudaEventElapsedTime(&cudnnFFTTime, event_start, event_stop); dim3 grid(1,14,1); dim3 block(14,1,32); cudaEventRecord(event_start); default_function_kernel0<<<grid, block>>>(device_input, device_K, device_out); cudaEventRecord(event_stop); cudaEventSynchronize(event_stop); float time_tvm; cudaEventElapsedTime(&time_tvm, event_start, event_stop); float *out_tvm = new float[N*H*W]; cudaMemcpy(out_tvm,device_out,N*H*W*sizeof(float),cudaMemcpyDeviceToHost); cudaMemset(device_out, 0, sizeof(float)*N*H*W); chkerr(cudaFuncSetAttribute(conv2d,cudaFuncAttributeMaxDynamicSharedMemorySize, TC*(TH+2)*(WPAD)*4)); cudaEventRecord(event_start); conv2d<<<TCS*THS, N * TWS, TC*(TH+2)*(WPAD)*4>>>(device_input, device_K, device_out); cudaEventRecord(event_stop); cudaEventSynchronize(event_stop); float time_tdc; cudaEventElapsedTime(&time_tdc, event_start, event_stop); float *out_tdc = new float[N*H*W]; cudaMemcpy(out_tdc,device_out,N*H*W*sizeof(float),cudaMemcpyDeviceToHost); ofstream outfile; char buffer[1000]; int ret = sprintf(buffer,"%d,%d,%d,%d,%f,%f,%f,%f,%f,%f,%f,%f,%f\n",N,C,H,W, cudnnFFTTime,cudnnWinogradeTimeNon,cudnnGemmTime,time_tvm,time_tdc, cudnnFFTTime/time_tdc,cudnnWinogradeTimeNon/time_tdc,cudnnGemmTime/time_tdc,time_tvm/time_tdc); outfile.open("../../evaluation_outcome/A100-layers-eval-modeling.csv", std::ios_base::app); outfile << buffer; float difference = check_diff(out_tvm, out_tdc, N*H*W); cout<<N<<","<<C<<","<<H<<","<<W<<","<<cudnnFFTTime<<","<<cudnnWinogradeTimeNon<<","<<cudnnGemmTime<<","<< time_tvm<<","<<time_tdc<<","<<cudnnFFTTime/time_tdc<<","<< cudnnWinogradeTimeNon/time_tdc<<","<<cudnnGemmTime/time_tdc<<","<<time_tvm/time_tdc<<endl; return 0; }
e9d9865e067acc8237d593a5318c9cbdd0b8709a.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" // // auto-generated by ops.py // __constant__ int xdim0_update_halo_kernel4_plus_4_b; int xdim0_update_halo_kernel4_plus_4_b_h = -1; __constant__ int ydim0_update_halo_kernel4_plus_4_b; int ydim0_update_halo_kernel4_plus_4_b_h = -1; __constant__ int xdim1_update_halo_kernel4_plus_4_b; int xdim1_update_halo_kernel4_plus_4_b_h = -1; __constant__ int ydim1_update_halo_kernel4_plus_4_b; int ydim1_update_halo_kernel4_plus_4_b_h = -1; #undef OPS_ACC0 #undef OPS_ACC1 #define OPS_ACC0(x, y, z) \ (x + xdim0_update_halo_kernel4_plus_4_b * (y) + \ xdim0_update_halo_kernel4_plus_4_b * ydim0_update_halo_kernel4_plus_4_b * \ (z)) #define OPS_ACC1(x, y, z) \ (x + xdim1_update_halo_kernel4_plus_4_b * (y) + \ xdim1_update_halo_kernel4_plus_4_b * ydim1_update_halo_kernel4_plus_4_b * \ (z)) // user function __device__ inline void update_halo_kernel4_plus_4_b(double *vol_flux_y, double *mass_flux_y, const int *fields) { if (fields[FIELD_VOL_FLUX_Y] == 1) vol_flux_y[OPS_ACC0(0, 0, 0)] = vol_flux_y[OPS_ACC0(-4, 0, 0)]; if (fields[FIELD_MASS_FLUX_Y] == 1) mass_flux_y[OPS_ACC1(0, 0, 0)] = mass_flux_y[OPS_ACC1(-4, 0, 0)]; } #undef OPS_ACC0 #undef OPS_ACC1 __global__ void ops_update_halo_kernel4_plus_4_b(double *__restrict arg0, double *__restrict arg1, const int *__restrict arg2, int size0, int size1, int size2) { int idx_z = blockDim.z * blockIdx.z + threadIdx.z; int idx_y = blockDim.y * blockIdx.y + threadIdx.y; int idx_x = blockDim.x * blockIdx.x + threadIdx.x; arg0 += idx_x * 1 * 1 + idx_y * 1 * 1 * xdim0_update_halo_kernel4_plus_4_b + idx_z * 1 * 1 * xdim0_update_halo_kernel4_plus_4_b * ydim0_update_halo_kernel4_plus_4_b; arg1 += idx_x * 1 * 1 + idx_y * 1 * 1 * xdim1_update_halo_kernel4_plus_4_b + idx_z * 1 * 1 * xdim1_update_halo_kernel4_plus_4_b * ydim1_update_halo_kernel4_plus_4_b; if (idx_x < size0 && idx_y < size1 && idx_z < size2) { update_halo_kernel4_plus_4_b(arg0, arg1, arg2); } } // host stub function void ops_par_loop_update_halo_kernel4_plus_4_b(char const *name, ops_block block, int dim, int *range, ops_arg arg0, ops_arg arg1, ops_arg arg2) { // Timing double t1, t2, c1, c2; ops_arg args[3] = {arg0, arg1, arg2}; #ifdef CHECKPOINTING if (!ops_checkpointing_before(args, 3, range, 122)) return; #endif if (OPS_diags > 1) { ops_timing_realloc(122, "update_halo_kernel4_plus_4_b"); OPS_kernels[122].count++; ops_timers_core(&c1, &t1); } // compute locally allocated range for the sub-block int start[3]; int end[3]; #ifdef OPS_MPI sub_block_list sb = OPS_sub_block_list[block->index]; if (!sb->owned) return; for (int n = 0; n < 3; n++) { start[n] = sb->decomp_disp[n]; end[n] = sb->decomp_disp[n] + sb->decomp_size[n]; if (start[n] >= range[2 * n]) { start[n] = 0; } else { start[n] = range[2 * n] - start[n]; } if (sb->id_m[n] == MPI_PROC_NULL && range[2 * n] < 0) start[n] = range[2 * n]; if (end[n] >= range[2 * n + 1]) { end[n] = range[2 * n + 1] - sb->decomp_disp[n]; } else { end[n] = sb->decomp_size[n]; } if (sb->id_p[n] == MPI_PROC_NULL && (range[2 * n + 1] > sb->decomp_disp[n] + sb->decomp_size[n])) end[n] += (range[2 * n + 1] - sb->decomp_disp[n] - sb->decomp_size[n]); } #else for (int n = 0; n < 3; n++) { start[n] = range[2 * n]; end[n] = range[2 * n + 1]; } #endif int x_size = MAX(0, end[0] - start[0]); int y_size = MAX(0, end[1] - start[1]); int z_size = MAX(0, end[2] - start[2]); int xdim0 = args[0].dat->size[0]; int ydim0 = args[0].dat->size[1]; int xdim1 = args[1].dat->size[0]; int ydim1 = args[1].dat->size[1]; if (xdim0 != xdim0_update_halo_kernel4_plus_4_b_h || ydim0 != ydim0_update_halo_kernel4_plus_4_b_h || xdim1 != xdim1_update_halo_kernel4_plus_4_b_h || ydim1 != ydim1_update_halo_kernel4_plus_4_b_h) { hipMemcpyToSymbol(xdim0_update_halo_kernel4_plus_4_b, &xdim0, sizeof(int)); xdim0_update_halo_kernel4_plus_4_b_h = xdim0; hipMemcpyToSymbol(ydim0_update_halo_kernel4_plus_4_b, &ydim0, sizeof(int)); ydim0_update_halo_kernel4_plus_4_b_h = ydim0; hipMemcpyToSymbol(xdim1_update_halo_kernel4_plus_4_b, &xdim1, sizeof(int)); xdim1_update_halo_kernel4_plus_4_b_h = xdim1; hipMemcpyToSymbol(ydim1_update_halo_kernel4_plus_4_b, &ydim1, sizeof(int)); ydim1_update_halo_kernel4_plus_4_b_h = ydim1; } int *arg2h = (int *)arg2.data; dim3 grid((x_size - 1) / OPS_block_size_x + 1, (y_size - 1) / OPS_block_size_y + 1, z_size); dim3 tblock(OPS_block_size_x, OPS_block_size_y, 1); int consts_bytes = 0; consts_bytes += ROUND_UP(NUM_FIELDS * sizeof(int)); reallocConstArrays(consts_bytes); consts_bytes = 0; arg2.data = OPS_consts_h + consts_bytes; arg2.data_d = OPS_consts_d + consts_bytes; for (int d = 0; d < NUM_FIELDS; d++) ((int *)arg2.data)[d] = arg2h[d]; consts_bytes += ROUND_UP(NUM_FIELDS * sizeof(int)); mvConstArraysToDevice(consts_bytes); int dat0 = args[0].dat->elem_size; int dat1 = args[1].dat->elem_size; char *p_a[3]; // set up initial pointers int d_m[OPS_MAX_DIM]; #ifdef OPS_MPI for (int d = 0; d < dim; d++) d_m[d] = args[0].dat->d_m[d] + OPS_sub_dat_list[args[0].dat->index]->d_im[d]; #else for (int d = 0; d < dim; d++) d_m[d] = args[0].dat->d_m[d]; #endif int base0 = dat0 * 1 * (start[0] * args[0].stencil->stride[0] - args[0].dat->base[0] - d_m[0]); base0 = base0 + dat0 * args[0].dat->size[0] * (start[1] * args[0].stencil->stride[1] - args[0].dat->base[1] - d_m[1]); base0 = base0 + dat0 * args[0].dat->size[0] * args[0].dat->size[1] * (start[2] * args[0].stencil->stride[2] - args[0].dat->base[2] - d_m[2]); p_a[0] = (char *)args[0].data_d + base0; #ifdef OPS_MPI for (int d = 0; d < dim; d++) d_m[d] = args[1].dat->d_m[d] + OPS_sub_dat_list[args[1].dat->index]->d_im[d]; #else for (int d = 0; d < dim; d++) d_m[d] = args[1].dat->d_m[d]; #endif int base1 = dat1 * 1 * (start[0] * args[1].stencil->stride[0] - args[1].dat->base[0] - d_m[0]); base1 = base1 + dat1 * args[1].dat->size[0] * (start[1] * args[1].stencil->stride[1] - args[1].dat->base[1] - d_m[1]); base1 = base1 + dat1 * args[1].dat->size[0] * args[1].dat->size[1] * (start[2] * args[1].stencil->stride[2] - args[1].dat->base[2] - d_m[2]); p_a[1] = (char *)args[1].data_d + base1; ops_H_D_exchanges_device(args, 3); ops_halo_exchanges(args, 3, range); if (OPS_diags > 1) { ops_timers_core(&c2, &t2); OPS_kernels[122].mpi_time += t2 - t1; } // call kernel wrapper function, passing in pointers to data hipLaunchKernelGGL(( ops_update_halo_kernel4_plus_4_b), dim3(grid), dim3(tblock), 0, 0, (double *)p_a[0], (double *)p_a[1], (int *)arg2.data_d, x_size, y_size, z_size); if (OPS_diags > 1) { cutilSafeCall(hipDeviceSynchronize()); ops_timers_core(&c1, &t1); OPS_kernels[122].time += t1 - t2; } ops_set_dirtybit_device(args, 3); ops_set_halo_dirtybit3(&args[0], range); ops_set_halo_dirtybit3(&args[1], range); if (OPS_diags > 1) { // Update kernel record ops_timers_core(&c2, &t2); OPS_kernels[122].mpi_time += t2 - t1; OPS_kernels[122].transfer += ops_compute_transfer(dim, start, end, &arg0); OPS_kernels[122].transfer += ops_compute_transfer(dim, start, end, &arg1); } }
e9d9865e067acc8237d593a5318c9cbdd0b8709a.cu
// // auto-generated by ops.py // __constant__ int xdim0_update_halo_kernel4_plus_4_b; int xdim0_update_halo_kernel4_plus_4_b_h = -1; __constant__ int ydim0_update_halo_kernel4_plus_4_b; int ydim0_update_halo_kernel4_plus_4_b_h = -1; __constant__ int xdim1_update_halo_kernel4_plus_4_b; int xdim1_update_halo_kernel4_plus_4_b_h = -1; __constant__ int ydim1_update_halo_kernel4_plus_4_b; int ydim1_update_halo_kernel4_plus_4_b_h = -1; #undef OPS_ACC0 #undef OPS_ACC1 #define OPS_ACC0(x, y, z) \ (x + xdim0_update_halo_kernel4_plus_4_b * (y) + \ xdim0_update_halo_kernel4_plus_4_b * ydim0_update_halo_kernel4_plus_4_b * \ (z)) #define OPS_ACC1(x, y, z) \ (x + xdim1_update_halo_kernel4_plus_4_b * (y) + \ xdim1_update_halo_kernel4_plus_4_b * ydim1_update_halo_kernel4_plus_4_b * \ (z)) // user function __device__ inline void update_halo_kernel4_plus_4_b(double *vol_flux_y, double *mass_flux_y, const int *fields) { if (fields[FIELD_VOL_FLUX_Y] == 1) vol_flux_y[OPS_ACC0(0, 0, 0)] = vol_flux_y[OPS_ACC0(-4, 0, 0)]; if (fields[FIELD_MASS_FLUX_Y] == 1) mass_flux_y[OPS_ACC1(0, 0, 0)] = mass_flux_y[OPS_ACC1(-4, 0, 0)]; } #undef OPS_ACC0 #undef OPS_ACC1 __global__ void ops_update_halo_kernel4_plus_4_b(double *__restrict arg0, double *__restrict arg1, const int *__restrict arg2, int size0, int size1, int size2) { int idx_z = blockDim.z * blockIdx.z + threadIdx.z; int idx_y = blockDim.y * blockIdx.y + threadIdx.y; int idx_x = blockDim.x * blockIdx.x + threadIdx.x; arg0 += idx_x * 1 * 1 + idx_y * 1 * 1 * xdim0_update_halo_kernel4_plus_4_b + idx_z * 1 * 1 * xdim0_update_halo_kernel4_plus_4_b * ydim0_update_halo_kernel4_plus_4_b; arg1 += idx_x * 1 * 1 + idx_y * 1 * 1 * xdim1_update_halo_kernel4_plus_4_b + idx_z * 1 * 1 * xdim1_update_halo_kernel4_plus_4_b * ydim1_update_halo_kernel4_plus_4_b; if (idx_x < size0 && idx_y < size1 && idx_z < size2) { update_halo_kernel4_plus_4_b(arg0, arg1, arg2); } } // host stub function void ops_par_loop_update_halo_kernel4_plus_4_b(char const *name, ops_block block, int dim, int *range, ops_arg arg0, ops_arg arg1, ops_arg arg2) { // Timing double t1, t2, c1, c2; ops_arg args[3] = {arg0, arg1, arg2}; #ifdef CHECKPOINTING if (!ops_checkpointing_before(args, 3, range, 122)) return; #endif if (OPS_diags > 1) { ops_timing_realloc(122, "update_halo_kernel4_plus_4_b"); OPS_kernels[122].count++; ops_timers_core(&c1, &t1); } // compute locally allocated range for the sub-block int start[3]; int end[3]; #ifdef OPS_MPI sub_block_list sb = OPS_sub_block_list[block->index]; if (!sb->owned) return; for (int n = 0; n < 3; n++) { start[n] = sb->decomp_disp[n]; end[n] = sb->decomp_disp[n] + sb->decomp_size[n]; if (start[n] >= range[2 * n]) { start[n] = 0; } else { start[n] = range[2 * n] - start[n]; } if (sb->id_m[n] == MPI_PROC_NULL && range[2 * n] < 0) start[n] = range[2 * n]; if (end[n] >= range[2 * n + 1]) { end[n] = range[2 * n + 1] - sb->decomp_disp[n]; } else { end[n] = sb->decomp_size[n]; } if (sb->id_p[n] == MPI_PROC_NULL && (range[2 * n + 1] > sb->decomp_disp[n] + sb->decomp_size[n])) end[n] += (range[2 * n + 1] - sb->decomp_disp[n] - sb->decomp_size[n]); } #else for (int n = 0; n < 3; n++) { start[n] = range[2 * n]; end[n] = range[2 * n + 1]; } #endif int x_size = MAX(0, end[0] - start[0]); int y_size = MAX(0, end[1] - start[1]); int z_size = MAX(0, end[2] - start[2]); int xdim0 = args[0].dat->size[0]; int ydim0 = args[0].dat->size[1]; int xdim1 = args[1].dat->size[0]; int ydim1 = args[1].dat->size[1]; if (xdim0 != xdim0_update_halo_kernel4_plus_4_b_h || ydim0 != ydim0_update_halo_kernel4_plus_4_b_h || xdim1 != xdim1_update_halo_kernel4_plus_4_b_h || ydim1 != ydim1_update_halo_kernel4_plus_4_b_h) { cudaMemcpyToSymbol(xdim0_update_halo_kernel4_plus_4_b, &xdim0, sizeof(int)); xdim0_update_halo_kernel4_plus_4_b_h = xdim0; cudaMemcpyToSymbol(ydim0_update_halo_kernel4_plus_4_b, &ydim0, sizeof(int)); ydim0_update_halo_kernel4_plus_4_b_h = ydim0; cudaMemcpyToSymbol(xdim1_update_halo_kernel4_plus_4_b, &xdim1, sizeof(int)); xdim1_update_halo_kernel4_plus_4_b_h = xdim1; cudaMemcpyToSymbol(ydim1_update_halo_kernel4_plus_4_b, &ydim1, sizeof(int)); ydim1_update_halo_kernel4_plus_4_b_h = ydim1; } int *arg2h = (int *)arg2.data; dim3 grid((x_size - 1) / OPS_block_size_x + 1, (y_size - 1) / OPS_block_size_y + 1, z_size); dim3 tblock(OPS_block_size_x, OPS_block_size_y, 1); int consts_bytes = 0; consts_bytes += ROUND_UP(NUM_FIELDS * sizeof(int)); reallocConstArrays(consts_bytes); consts_bytes = 0; arg2.data = OPS_consts_h + consts_bytes; arg2.data_d = OPS_consts_d + consts_bytes; for (int d = 0; d < NUM_FIELDS; d++) ((int *)arg2.data)[d] = arg2h[d]; consts_bytes += ROUND_UP(NUM_FIELDS * sizeof(int)); mvConstArraysToDevice(consts_bytes); int dat0 = args[0].dat->elem_size; int dat1 = args[1].dat->elem_size; char *p_a[3]; // set up initial pointers int d_m[OPS_MAX_DIM]; #ifdef OPS_MPI for (int d = 0; d < dim; d++) d_m[d] = args[0].dat->d_m[d] + OPS_sub_dat_list[args[0].dat->index]->d_im[d]; #else for (int d = 0; d < dim; d++) d_m[d] = args[0].dat->d_m[d]; #endif int base0 = dat0 * 1 * (start[0] * args[0].stencil->stride[0] - args[0].dat->base[0] - d_m[0]); base0 = base0 + dat0 * args[0].dat->size[0] * (start[1] * args[0].stencil->stride[1] - args[0].dat->base[1] - d_m[1]); base0 = base0 + dat0 * args[0].dat->size[0] * args[0].dat->size[1] * (start[2] * args[0].stencil->stride[2] - args[0].dat->base[2] - d_m[2]); p_a[0] = (char *)args[0].data_d + base0; #ifdef OPS_MPI for (int d = 0; d < dim; d++) d_m[d] = args[1].dat->d_m[d] + OPS_sub_dat_list[args[1].dat->index]->d_im[d]; #else for (int d = 0; d < dim; d++) d_m[d] = args[1].dat->d_m[d]; #endif int base1 = dat1 * 1 * (start[0] * args[1].stencil->stride[0] - args[1].dat->base[0] - d_m[0]); base1 = base1 + dat1 * args[1].dat->size[0] * (start[1] * args[1].stencil->stride[1] - args[1].dat->base[1] - d_m[1]); base1 = base1 + dat1 * args[1].dat->size[0] * args[1].dat->size[1] * (start[2] * args[1].stencil->stride[2] - args[1].dat->base[2] - d_m[2]); p_a[1] = (char *)args[1].data_d + base1; ops_H_D_exchanges_device(args, 3); ops_halo_exchanges(args, 3, range); if (OPS_diags > 1) { ops_timers_core(&c2, &t2); OPS_kernels[122].mpi_time += t2 - t1; } // call kernel wrapper function, passing in pointers to data ops_update_halo_kernel4_plus_4_b<<<grid, tblock>>>( (double *)p_a[0], (double *)p_a[1], (int *)arg2.data_d, x_size, y_size, z_size); if (OPS_diags > 1) { cutilSafeCall(cudaDeviceSynchronize()); ops_timers_core(&c1, &t1); OPS_kernels[122].time += t1 - t2; } ops_set_dirtybit_device(args, 3); ops_set_halo_dirtybit3(&args[0], range); ops_set_halo_dirtybit3(&args[1], range); if (OPS_diags > 1) { // Update kernel record ops_timers_core(&c2, &t2); OPS_kernels[122].mpi_time += t2 - t1; OPS_kernels[122].transfer += ops_compute_transfer(dim, start, end, &arg0); OPS_kernels[122].transfer += ops_compute_transfer(dim, start, end, &arg1); } }
d5960674fe5b0adaceac431e994f9d0f20d5d961.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include <stdio.h> #include <stdlib.h> #include <time.h> #define N 22528*22528 // 22k ^ 2 = 507510784 #define THREAD_PER_BLOCK 1024 #define GRID N/THREAD_PER_BLOCK __global__ void outputFromGPU(int *arr, int *min, int *max, int *sum, int *mutex) { //GPU int index = threadIdx.x + blockIdx.x * blockDim.x; int stride = gridDim.x * blockDim.x; int offset = stride; __shared__ int s_min[THREAD_PER_BLOCK]; __shared__ int s_max[THREAD_PER_BLOCK]; __shared__ int s_sum[THREAD_PER_BLOCK]; int _min = arr[index]; int _max = arr[index]; int _sum = arr[index]; while(index + offset < N) { _min = (_min > arr[index + offset])?arr[index + offset]:_min; _max = (_max < arr[index + offset])?arr[index + offset]:_max; _sum += arr[index + offset]; offset += stride; } s_min[threadIdx.x] = _min; s_max[threadIdx.x] = _max; s_sum[threadIdx.x] = _sum; __syncthreads(); int i = blockDim.x / 2; while(i != 0) { if(threadIdx.x < i) { s_min[threadIdx.x] = (s_min[threadIdx.x] > s_min[threadIdx.x + i])?s_min[threadIdx.x + i]:s_min[threadIdx.x]; s_max[threadIdx.x] = (s_max[threadIdx.x] < s_max[threadIdx.x + i])?s_max[threadIdx.x + i]:s_max[threadIdx.x]; s_sum[threadIdx.x] += s_sum[threadIdx.x + i]; } __syncthreads(); i /= 2; } if(threadIdx.x == 0) { while(atomicCAS(mutex, 0, 1) != 0); *min = (*min > s_min[0])?s_min[0]:*min; *max = (*max < s_max[0])?s_max[0]:*max; *sum += s_sum[0]; atomicExch(mutex, 0); } } __host__ void get_min_max(int *arr, int *min, int *max, int *sum) { //CPU *min = arr[0]; *max = arr[0]; *sum = arr[0]; for(int i = 1; i < N; i++) { *min = (*min > arr[i])?*min = arr[i]:*min; *max = (*max < arr[i])?*max = arr[i]:*max; *sum += arr[i]; } } __host__ void get_min_max_advance(int *arr, int *min, int *max, int *sum) { //CPU int t; int stride = N/2; t = (arr[0] > arr[stride])?arr[stride]:arr[0]; *min = (*min > t)?*min = t:*min; t = (arr[0] < arr[stride])?arr[stride]:arr[0]; *max = (*max < t)?*max = t:*max; *sum = arr[0]+arr[stride]; for(int i = 1; i < stride; i++) { t = (arr[i] > arr[i+stride])?arr[i+stride]:arr[i]; *min = (*min > t)?*min = t:*min; t = (arr[i] < arr[i+stride])?arr[i+stride]:arr[i]; *max = (*max < t)?*max = t:*max; *sum += arr[i]+arr[i+stride]; } } int main(void) { printf(":: Ex4 ::\n\n"); int *arr, *d_arr, *h_min, *h_max, *h_sum, *d_min, *d_max, *d_sum, *d_mutex; int i, min, max, sum; clock_t begin, end; float timeGPU, timeCPU; int size = sizeof(int); arr = (int*)malloc(size * N); h_min = (int*)malloc(size); h_max = (int*)malloc(size); h_sum = (int*)malloc(size); hipMalloc((void**)&d_arr, size * N); hipMalloc((void**)&d_min, size); hipMalloc((void**)&d_max, size); hipMalloc((void**)&d_sum, size); hipMalloc((void**)&d_mutex, size); printf("Initializing... please wait.\n\n"); srand (time(NULL)); for(i = 0; i < N; i++){arr[i] = rand()%20000;} printf("GPU Parallel Algorithm...\n"); *h_min = 20000; *h_max = 0; *h_sum = 0; begin = clock(); hipMemcpy(d_arr, arr, size * N, hipMemcpyHostToDevice); hipMemcpy(d_min, h_min, size, hipMemcpyHostToDevice); hipMemcpy(d_max, h_max, size, hipMemcpyHostToDevice); hipMemcpy(d_sum, h_sum, size, hipMemcpyHostToDevice); hipLaunchKernelGGL(( outputFromGPU), dim3(GRID), dim3(THREAD_PER_BLOCK), 0, 0, d_arr, d_min, d_max, d_sum, d_mutex); hipDeviceSynchronize(); hipMemcpy(h_min, d_min, size, hipMemcpyDeviceToHost); hipMemcpy(h_max, d_max, size, hipMemcpyDeviceToHost); hipMemcpy(h_sum, d_sum, size, hipMemcpyDeviceToHost); end = clock(); timeGPU = (float)(end-begin)/CLOCKS_PER_SEC; printf("(GPU) : time %f sec. : min: %d, max: %d, sum: %d\n\n", timeGPU, *h_min, *h_max, *h_sum); printf("CPU Basic Algorithm...\n"); begin = clock(); get_min_max(arr, &min, &max, &sum); end = clock(); timeCPU = (float)(end-begin)/CLOCKS_PER_SEC; printf("(CPU) : time %f sec. : min: %d, max: %d, sum: %d\n\n", timeCPU, min, max, sum); printf("CPU Advance Algorithm...\n"); begin = clock(); get_min_max_advance(arr, &min, &max, &sum); end = clock(); timeCPU = (float)(end-begin)/CLOCKS_PER_SEC; printf("(CPU) : time %f sec. : min: %d, max: %d, sum: %d\n", timeCPU, min, max, sum); hipFree(d_arr); hipFree(d_min); hipFree(d_max); hipFree(d_sum); hipFree(d_mutex); free(arr); free(h_min); free(h_max); free(h_sum); return 0; }
d5960674fe5b0adaceac431e994f9d0f20d5d961.cu
#include <stdio.h> #include <stdlib.h> #include <time.h> #define N 22528*22528 // 22k ^ 2 = 507510784 #define THREAD_PER_BLOCK 1024 #define GRID N/THREAD_PER_BLOCK __global__ void outputFromGPU(int *arr, int *min, int *max, int *sum, int *mutex) { //GPU int index = threadIdx.x + blockIdx.x * blockDim.x; int stride = gridDim.x * blockDim.x; int offset = stride; __shared__ int s_min[THREAD_PER_BLOCK]; __shared__ int s_max[THREAD_PER_BLOCK]; __shared__ int s_sum[THREAD_PER_BLOCK]; int _min = arr[index]; int _max = arr[index]; int _sum = arr[index]; while(index + offset < N) { _min = (_min > arr[index + offset])?arr[index + offset]:_min; _max = (_max < arr[index + offset])?arr[index + offset]:_max; _sum += arr[index + offset]; offset += stride; } s_min[threadIdx.x] = _min; s_max[threadIdx.x] = _max; s_sum[threadIdx.x] = _sum; __syncthreads(); int i = blockDim.x / 2; while(i != 0) { if(threadIdx.x < i) { s_min[threadIdx.x] = (s_min[threadIdx.x] > s_min[threadIdx.x + i])?s_min[threadIdx.x + i]:s_min[threadIdx.x]; s_max[threadIdx.x] = (s_max[threadIdx.x] < s_max[threadIdx.x + i])?s_max[threadIdx.x + i]:s_max[threadIdx.x]; s_sum[threadIdx.x] += s_sum[threadIdx.x + i]; } __syncthreads(); i /= 2; } if(threadIdx.x == 0) { while(atomicCAS(mutex, 0, 1) != 0); *min = (*min > s_min[0])?s_min[0]:*min; *max = (*max < s_max[0])?s_max[0]:*max; *sum += s_sum[0]; atomicExch(mutex, 0); } } __host__ void get_min_max(int *arr, int *min, int *max, int *sum) { //CPU *min = arr[0]; *max = arr[0]; *sum = arr[0]; for(int i = 1; i < N; i++) { *min = (*min > arr[i])?*min = arr[i]:*min; *max = (*max < arr[i])?*max = arr[i]:*max; *sum += arr[i]; } } __host__ void get_min_max_advance(int *arr, int *min, int *max, int *sum) { //CPU int t; int stride = N/2; t = (arr[0] > arr[stride])?arr[stride]:arr[0]; *min = (*min > t)?*min = t:*min; t = (arr[0] < arr[stride])?arr[stride]:arr[0]; *max = (*max < t)?*max = t:*max; *sum = arr[0]+arr[stride]; for(int i = 1; i < stride; i++) { t = (arr[i] > arr[i+stride])?arr[i+stride]:arr[i]; *min = (*min > t)?*min = t:*min; t = (arr[i] < arr[i+stride])?arr[i+stride]:arr[i]; *max = (*max < t)?*max = t:*max; *sum += arr[i]+arr[i+stride]; } } int main(void) { printf(":: Ex4 ::\n\n"); int *arr, *d_arr, *h_min, *h_max, *h_sum, *d_min, *d_max, *d_sum, *d_mutex; int i, min, max, sum; clock_t begin, end; float timeGPU, timeCPU; int size = sizeof(int); arr = (int*)malloc(size * N); h_min = (int*)malloc(size); h_max = (int*)malloc(size); h_sum = (int*)malloc(size); cudaMalloc((void**)&d_arr, size * N); cudaMalloc((void**)&d_min, size); cudaMalloc((void**)&d_max, size); cudaMalloc((void**)&d_sum, size); cudaMalloc((void**)&d_mutex, size); printf("Initializing... please wait.\n\n"); srand (time(NULL)); for(i = 0; i < N; i++){arr[i] = rand()%20000;} printf("GPU Parallel Algorithm...\n"); *h_min = 20000; *h_max = 0; *h_sum = 0; begin = clock(); cudaMemcpy(d_arr, arr, size * N, cudaMemcpyHostToDevice); cudaMemcpy(d_min, h_min, size, cudaMemcpyHostToDevice); cudaMemcpy(d_max, h_max, size, cudaMemcpyHostToDevice); cudaMemcpy(d_sum, h_sum, size, cudaMemcpyHostToDevice); outputFromGPU<<<GRID, THREAD_PER_BLOCK>>>(d_arr, d_min, d_max, d_sum, d_mutex); cudaDeviceSynchronize(); cudaMemcpy(h_min, d_min, size, cudaMemcpyDeviceToHost); cudaMemcpy(h_max, d_max, size, cudaMemcpyDeviceToHost); cudaMemcpy(h_sum, d_sum, size, cudaMemcpyDeviceToHost); end = clock(); timeGPU = (float)(end-begin)/CLOCKS_PER_SEC; printf("(GPU) : time %f sec. : min: %d, max: %d, sum: %d\n\n", timeGPU, *h_min, *h_max, *h_sum); printf("CPU Basic Algorithm...\n"); begin = clock(); get_min_max(arr, &min, &max, &sum); end = clock(); timeCPU = (float)(end-begin)/CLOCKS_PER_SEC; printf("(CPU) : time %f sec. : min: %d, max: %d, sum: %d\n\n", timeCPU, min, max, sum); printf("CPU Advance Algorithm...\n"); begin = clock(); get_min_max_advance(arr, &min, &max, &sum); end = clock(); timeCPU = (float)(end-begin)/CLOCKS_PER_SEC; printf("(CPU) : time %f sec. : min: %d, max: %d, sum: %d\n", timeCPU, min, max, sum); cudaFree(d_arr); cudaFree(d_min); cudaFree(d_max); cudaFree(d_sum); cudaFree(d_mutex); free(arr); free(h_min); free(h_max); free(h_sum); return 0; }
f39da35c83f362004e9050b1487b6293dddcf4a0.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #ifdef USE_CUDNN #include <vector> #include "caffe/layers/cudnn_conv_layer.hpp" namespace caffe { __global__ void sync_conv_groups() { } template <typename Dtype> void CuDNNConvolutionLayer<Dtype>::Forward_gpu( const vector<Blob<Dtype>*>& bottom, const vector<Blob<Dtype>*>& top) { const Dtype* weight = this->blobs_[0]->gpu_data(); for (int i = 0; i < bottom.size(); ++i) { const Dtype* bottom_data = bottom[i]->gpu_data(); Dtype* top_data = top[i]->mutable_gpu_data(); // Forward through cuDNN in parallel over groups. for (int g = 0; g < this->group_; g++) { //fang:group_ BaseConvolutionLayer group_, group // Filters. CUDNN_CHECK(cudnnConvolutionForward(handle_[g], cudnn::dataType<Dtype>::one, bottom_descs_[i], bottom_data + bottom_offset_ * g, filter_desc_, weight + this->weight_offset_ * g, conv_descs_[i], fwd_algo_[i], workspace[g], workspace_fwd_sizes_[i], cudnn::dataType<Dtype>::zero, top_descs_[i], top_data + top_offset_ * g)); // Bias. if (this->bias_term_) { const Dtype* bias_data = this->blobs_[1]->gpu_data(); CUDNN_CHECK(cudnnAddTensor(handle_[g], cudnn::dataType<Dtype>::one, bias_desc_, bias_data + bias_offset_ * g, cudnn::dataType<Dtype>::one, top_descs_[i], top_data + top_offset_ * g)); } } // Synchronize the work across groups, each of which went into its own // stream, by launching an empty kernel into the default (null) stream. // NOLINT_NEXT_LINE(whitespace/operators) hipLaunchKernelGGL(( sync_conv_groups), dim3(1), dim3(1), 0, 0, ); } } template <typename Dtype> void CuDNNConvolutionLayer<Dtype>::Backward_gpu(const vector<Blob<Dtype>*>& top, const vector<bool>& propagate_down, const vector<Blob<Dtype>*>& bottom) { const Dtype* weight = NULL; Dtype* weight_diff = NULL; if (this->param_propagate_down_[0]) { weight = this->blobs_[0]->gpu_data(); weight_diff = this->blobs_[0]->mutable_gpu_diff(); } Dtype* bias_diff = NULL; if (this->bias_term_ && this->param_propagate_down_[1]) { bias_diff = this->blobs_[1]->mutable_gpu_diff(); } for (int i = 0; i < top.size(); ++i) { const Dtype* top_diff = top[i]->gpu_diff(); // Backward through cuDNN in parallel over groups and gradients. for (int g = 0; g < this->group_; g++) { // Gradient w.r.t. bias. if (this->bias_term_ && this->param_propagate_down_[1]) { CUDNN_CHECK(cudnnConvolutionBackwardBias(handle_[0*this->group_ + g], cudnn::dataType<Dtype>::one, top_descs_[i], top_diff + top_offset_ * g, cudnn::dataType<Dtype>::one, bias_desc_, bias_diff + bias_offset_ * g)); } // Gradient w.r.t. weights. if (this->param_propagate_down_[0]) { const Dtype* bottom_data = bottom[i]->gpu_data(); CUDNN_CHECK(cudnnConvolutionBackwardFilter( handle_[1*this->group_ + g], cudnn::dataType<Dtype>::one, bottom_descs_[i], bottom_data + bottom_offset_ * g, top_descs_[i], top_diff + top_offset_ * g, conv_descs_[i], bwd_filter_algo_[i], workspace[1*this->group_ + g], workspace_bwd_filter_sizes_[i], cudnn::dataType<Dtype>::one, filter_desc_, weight_diff + this->weight_offset_ * g)); } // Gradient w.r.t. bottom data. if (propagate_down[i]) { if (weight == NULL) { weight = this->blobs_[0]->gpu_data(); } Dtype* bottom_diff = bottom[i]->mutable_gpu_diff(); CUDNN_CHECK(cudnnConvolutionBackwardData( handle_[2*this->group_ + g], cudnn::dataType<Dtype>::one, filter_desc_, weight + this->weight_offset_ * g, top_descs_[i], top_diff + top_offset_ * g, conv_descs_[i], bwd_data_algo_[i], workspace[2*this->group_ + g], workspace_bwd_data_sizes_[i], cudnn::dataType<Dtype>::zero, bottom_descs_[i], bottom_diff + bottom_offset_ * g)); } } // Synchronize the work across groups, each of which went into its own // stream, by launching an empty kernel into the default (null) stream. // NOLINT_NEXT_LINE(whitespace/operators) hipLaunchKernelGGL(( sync_conv_groups), dim3(1), dim3(1), 0, 0, ); } } INSTANTIATE_LAYER_GPU_FUNCS(CuDNNConvolutionLayer); } // namespace caffe #endif
f39da35c83f362004e9050b1487b6293dddcf4a0.cu
#ifdef USE_CUDNN #include <vector> #include "caffe/layers/cudnn_conv_layer.hpp" namespace caffe { __global__ void sync_conv_groups() { } template <typename Dtype> void CuDNNConvolutionLayer<Dtype>::Forward_gpu( const vector<Blob<Dtype>*>& bottom, const vector<Blob<Dtype>*>& top) { const Dtype* weight = this->blobs_[0]->gpu_data(); for (int i = 0; i < bottom.size(); ++i) { const Dtype* bottom_data = bottom[i]->gpu_data(); Dtype* top_data = top[i]->mutable_gpu_data(); // Forward through cuDNN in parallel over groups. for (int g = 0; g < this->group_; g++) { //fang:group_ 是继承自 BaseConvolutionLayer 的group_,即卷积参数 group // Filters. CUDNN_CHECK(cudnnConvolutionForward(handle_[g], cudnn::dataType<Dtype>::one, bottom_descs_[i], bottom_data + bottom_offset_ * g, filter_desc_, weight + this->weight_offset_ * g, conv_descs_[i], fwd_algo_[i], workspace[g], workspace_fwd_sizes_[i], cudnn::dataType<Dtype>::zero, top_descs_[i], top_data + top_offset_ * g)); // Bias. if (this->bias_term_) { const Dtype* bias_data = this->blobs_[1]->gpu_data(); CUDNN_CHECK(cudnnAddTensor(handle_[g], cudnn::dataType<Dtype>::one, bias_desc_, bias_data + bias_offset_ * g, cudnn::dataType<Dtype>::one, top_descs_[i], top_data + top_offset_ * g)); } } // Synchronize the work across groups, each of which went into its own // stream, by launching an empty kernel into the default (null) stream. // NOLINT_NEXT_LINE(whitespace/operators) sync_conv_groups<<<1, 1>>>(); } } template <typename Dtype> void CuDNNConvolutionLayer<Dtype>::Backward_gpu(const vector<Blob<Dtype>*>& top, const vector<bool>& propagate_down, const vector<Blob<Dtype>*>& bottom) { const Dtype* weight = NULL; Dtype* weight_diff = NULL; if (this->param_propagate_down_[0]) { weight = this->blobs_[0]->gpu_data(); weight_diff = this->blobs_[0]->mutable_gpu_diff(); } Dtype* bias_diff = NULL; if (this->bias_term_ && this->param_propagate_down_[1]) { bias_diff = this->blobs_[1]->mutable_gpu_diff(); } for (int i = 0; i < top.size(); ++i) { const Dtype* top_diff = top[i]->gpu_diff(); // Backward through cuDNN in parallel over groups and gradients. for (int g = 0; g < this->group_; g++) { // Gradient w.r.t. bias. if (this->bias_term_ && this->param_propagate_down_[1]) { CUDNN_CHECK(cudnnConvolutionBackwardBias(handle_[0*this->group_ + g], cudnn::dataType<Dtype>::one, top_descs_[i], top_diff + top_offset_ * g, cudnn::dataType<Dtype>::one, bias_desc_, bias_diff + bias_offset_ * g)); } // Gradient w.r.t. weights. if (this->param_propagate_down_[0]) { const Dtype* bottom_data = bottom[i]->gpu_data(); CUDNN_CHECK(cudnnConvolutionBackwardFilter( handle_[1*this->group_ + g], cudnn::dataType<Dtype>::one, bottom_descs_[i], bottom_data + bottom_offset_ * g, top_descs_[i], top_diff + top_offset_ * g, conv_descs_[i], bwd_filter_algo_[i], workspace[1*this->group_ + g], workspace_bwd_filter_sizes_[i], cudnn::dataType<Dtype>::one, filter_desc_, weight_diff + this->weight_offset_ * g)); } // Gradient w.r.t. bottom data. if (propagate_down[i]) { if (weight == NULL) { weight = this->blobs_[0]->gpu_data(); } Dtype* bottom_diff = bottom[i]->mutable_gpu_diff(); CUDNN_CHECK(cudnnConvolutionBackwardData( handle_[2*this->group_ + g], cudnn::dataType<Dtype>::one, filter_desc_, weight + this->weight_offset_ * g, top_descs_[i], top_diff + top_offset_ * g, conv_descs_[i], bwd_data_algo_[i], workspace[2*this->group_ + g], workspace_bwd_data_sizes_[i], cudnn::dataType<Dtype>::zero, bottom_descs_[i], bottom_diff + bottom_offset_ * g)); } } // Synchronize the work across groups, each of which went into its own // stream, by launching an empty kernel into the default (null) stream. // NOLINT_NEXT_LINE(whitespace/operators) sync_conv_groups<<<1, 1>>>(); } } INSTANTIATE_LAYER_GPU_FUNCS(CuDNNConvolutionLayer); } // namespace caffe #endif
98c9ee512158d0e7621ca5d29b2b9b2472504c3e.hip
// !!! This is a file automatically generated by hipify!!! #include <cstdio> #include <hip/hip_runtime.h> #include <cmath> #include <thrust/execution_policy.h> #include <thrust/random.h> #include <thrust/remove.h> #include "sceneStructs.h" #include "scene.h" #include "glm/glm.hpp" #include "glm/gtx/norm.hpp" #include <glm/gtc/matrix_transform.hpp> #include <glm/gtc/matrix_inverse.hpp> #include "utilities.h" #include "pathtrace.h" #include "intersections.h" #include "interactions.h" #include "efficient.h" #include <thrust/device_vector.h> #include <thrust/host_vector.h> #include <thrust/scan.h> #include "radix.h" #include <fstream> //Toggles Defines #include "Setting_defines.h" //Results file extern FILE *fp; #define ERRORCHECK 1 #define FILENAME (strrchr(__FILE__, '/') ? strrchr(__FILE__, '/') + 1 : __FILE__) #define checkCUDAError(msg) checkCUDAErrorFn(msg, FILENAME, __LINE__) void checkCUDAErrorFn(const char *msg, const char *file, int line) { #if ERRORCHECK hipDeviceSynchronize(); hipError_t err = hipGetLastError(); if (hipSuccess == err) { return; } fprintf(stderr, "CUDA error"); if (file) { fprintf(stderr, " (%s:%d)", file, line); } fprintf(stderr, ": %s: %s\n", msg, hipGetErrorString(err)); # ifdef _WIN32 getchar(); # endif exit(EXIT_FAILURE); #endif } __host__ __device__ thrust::default_random_engine makeSeededRandomEngine(int iter, int index, int depth) { int h = utilhash((1 << 31) | (depth << 22) | iter) ^ utilhash(index); return thrust::default_random_engine(h); } //Kernel that writes the image to the OpenGL PBO directly. __global__ void sendImageToPBO(uchar4* pbo, glm::ivec2 resolution, int iter, glm::vec3* image) { int x = (blockIdx.x * blockDim.x) + threadIdx.x; int y = (blockIdx.y * blockDim.y) + threadIdx.y; if (x < resolution.x && y < resolution.y) { int index = x + (y * resolution.x); glm::vec3 pix = image[index]; glm::ivec3 color; color.x = glm::clamp((int) (pix.x / iter * 255.0), 0, 255); color.y = glm::clamp((int) (pix.y / iter * 255.0), 0, 255); color.z = glm::clamp((int) (pix.z / iter * 255.0), 0, 255); // Each thread writes one pixel location in the texture (textel) pbo[index].w = 0; pbo[index].x = color.x; pbo[index].y = color.y; pbo[index].z = color.z; } } static Scene * hst_scene = NULL; static glm::vec3 * dev_image = NULL; static Geom * dev_geoms = NULL; static Material * dev_materials = NULL; static PathSegment * dev_paths = NULL; static ShadeableIntersection * dev_intersections = NULL; // TODO: static variables for device memory, any extra info you need, etc static PathSegment * dev_cache_paths = NULL; static ShadeableIntersection * dev_cache_intersections = NULL; static int * dev_flag_array = NULL; static Vertex * dev_vertices = NULL; static int * dev_lights_indices = NULL; // ... void pathtraceInit(Scene *scene) { hst_scene = scene; const Camera &cam = hst_scene->state.camera; const int pixelcount = cam.resolution.x * cam.resolution.y; hipMalloc(&dev_image, pixelcount * sizeof(glm::vec3)); hipMemset(dev_image, 0, pixelcount * sizeof(glm::vec3)); hipMalloc(&dev_paths, pixelcount * sizeof(PathSegment)); hipMalloc(&dev_geoms, scene->geoms.size() * sizeof(Geom)); hipMemcpy(dev_geoms, scene->geoms.data(), scene->geoms.size() * sizeof(Geom), hipMemcpyHostToDevice); hipMalloc(&dev_materials, scene->materials.size() * sizeof(Material)); hipMemcpy(dev_materials, scene->materials.data(), scene->materials.size() * sizeof(Material), hipMemcpyHostToDevice); hipMalloc(&dev_intersections, pixelcount * sizeof(ShadeableIntersection)); hipMemset(dev_intersections, 0, pixelcount * sizeof(ShadeableIntersection)); // TODO: initialize any extra device memeory you need //Cache hipMalloc(&dev_cache_paths, pixelcount * sizeof(PathSegment)); hipMalloc(&dev_cache_intersections, pixelcount * sizeof(ShadeableIntersection)); hipMemset(dev_intersections, 0, pixelcount * sizeof(ShadeableIntersection)); hipMalloc(&dev_flag_array, pixelcount * sizeof(int)); //Smooth Normals: //hst_scene->Smooth_Normals(); hipMalloc(&dev_vertices, hst_scene->vertices.size() * sizeof(Vertex)); hipMemcpy(dev_vertices, hst_scene->vertices.data(), hst_scene->vertices.size() * sizeof(Vertex), hipMemcpyHostToDevice); hipMalloc(&dev_lights_indices, hst_scene->lights_indices.size() * sizeof(int)); hipMemcpy(dev_lights_indices, hst_scene->lights_indices.data(), hst_scene->lights_indices.size() * sizeof(int), hipMemcpyHostToDevice); checkCUDAError("pathtraceInit"); } void pathtraceFree() { hipFree(dev_image); // no-op if dev_image is null hipFree(dev_paths); hipFree(dev_geoms); hipFree(dev_materials); hipFree(dev_intersections); // TODO: clean up any extra device memory you created hipFree(dev_cache_paths); hipFree(dev_cache_intersections); hipFree(dev_flag_array); hipFree(dev_lights_indices); checkCUDAError("pathtraceFree"); } //Reference: PBRT source code https://www.dartdocs.org/documentation/dartray/0.0.1/core/ConcentricSampleDisk.html //ConcentricSampleDisk __device__ glm::vec2 ConcentricSampleDisk(float u1, float u2) { float r, theta; float a, b; // Map uniform random numbers to $[-1,1]^2$ float sx = 2 * u1 - 1; float sy = 2 * u2 - 1; if (sx == 0.0 && sy == 0.0) { return glm::vec2(0.f); } if (sx >= -sy) { if (sx > sy) { // Handle first region of disk r = sx; if (sy > 0.0) theta = sy / r; else theta = 8.0f + sy / r; } else { // Handle second region of disk r = sy; theta = 2.0f - sx / r; } } else { if (sx <= sy) { // Handle third region of disk r = -sx; theta = 4.0f - sy / r; } else { // Handle fourth region of disk r = -sy; theta = 6.0f + sx / r; } } theta *= PI / 4.f; a = r * cosf(theta); b = r * sinf(theta); glm::vec2 returnValue(a, b); return returnValue; } /** * Generate PathSegments with rays from the camera through the screen into the * scene, which is the first bounce of rays. * * Antialiasing - add rays for sub-pixel sampling * motion blur - jitter rays "in time" * lens effect - jitter ray origin positions based on a lens */ __global__ void generateRayFromCamera(Camera cam, int iter, int traceDepth, PathSegment* pathSegments) { int x = (blockIdx.x * blockDim.x) + threadIdx.x; int y = (blockIdx.y * blockDim.y) + threadIdx.y; if (x < cam.resolution.x && y < cam.resolution.y) { int index = x + (y * cam.resolution.x); PathSegment & segment = pathSegments[index]; segment.ray.origin = cam.position; segment.color = glm::vec3(1.0f, 1.0f, 1.0f); segment.is_terminated = false; thrust::default_random_engine rng = makeSeededRandomEngine(iter, x + y, 0); thrust::uniform_real_distribution<float> u01(0, 1); // TODO: implement antialiasing by jittering the ray //Stochastic Sampling #if AntiAliasing_Toggle segment.ray.direction = glm::normalize(cam.view - cam.right * cam.pixelLength.x * ((float)(x + u01(rng)) - (float)cam.resolution.x * 0.5f) - cam.up * cam.pixelLength.y * ((float)(y + u01(rng)) - (float)cam.resolution.y * 0.5f) ); #else segment.ray.direction = glm::normalize(cam.view - cam.right * cam.pixelLength.x * ((float)x - (float)cam.resolution.x * 0.5f) - cam.up * cam.pixelLength.y * ((float)y - (float)cam.resolution.y * 0.5f) ); #endif //Depth of Field #if Depth_Of_Field_Toggle float u1 = u01(rng), u2 = u01(rng); glm::vec2 pLens = cam.lensRadius * ConcentricSampleDisk(u1, u2); glm::vec3 pFocus = segment.ray.origin + glm::abs(cam.focalLength / segment.ray.direction.z) * segment.ray.direction; segment.ray.origin += pLens.x * cam.right + pLens.y * cam.up; segment.ray.direction = glm::normalize(pFocus - segment.ray.origin); #endif segment.rand_time = u01(rng); segment.pixelIndex = index; segment.remainingBounces = traceDepth; } } __host__ __device__ glm::mat4 buildTransformationMatrix(glm::vec3 translation, glm::vec3 rotation, glm::vec3 scale) { glm::mat4 translationMat = glm::translate(glm::mat4(), translation); glm::mat4 rotationMat = glm::rotate(glm::mat4(), rotation.x * (float)PI / 180, glm::vec3(1, 0, 0)); rotationMat = rotationMat * glm::rotate(glm::mat4(), rotation.y * (float)PI / 180, glm::vec3(0, 1, 0)); rotationMat = rotationMat * glm::rotate(glm::mat4(), rotation.z * (float)PI / 180, glm::vec3(0, 0, 1)); glm::mat4 scaleMat = glm::scale(glm::mat4(), scale); return translationMat * rotationMat * scaleMat; } // TODO: // computeIntersections handles generating ray intersections ONLY. // Generating new rays is handled in your shader(s). // Feel free to modify the code below. __global__ void computeIntersections( int depth , int num_paths , PathSegment * pathSegments , Geom * geoms , int geoms_size , ShadeableIntersection * intersections , Vertex *vertices ) { int path_index = blockIdx.x * blockDim.x + threadIdx.x; if (path_index < num_paths) { PathSegment pathSegment = pathSegments[path_index]; float t; glm::vec3 intersect_point; glm::vec3 normal; float t_min = FLT_MAX; int hit_geom_index = -1; glm::vec3 tmp_intersect; glm::vec3 tmp_normal; bool outside = true; // naive parse through global geoms for (int i = 0; i < geoms_size; i++) { Geom & geom = geoms[i]; #if MotionBlur_Toggle glm::vec3 blur_pos = glm::clamp((1 - pathSegment.rand_time) * geom.translation + pathSegment.rand_time * geom.translation_end, geom.translation, geom.translation_end); geom.transform = buildTransformationMatrix(blur_pos, geom.rotation, geom.scale); geom.inverseTransform = glm::inverse(geom.transform); geom.invTranspose = glm::inverseTranspose(geom.transform); #endif if (geom.type == CUBE) { t = boxIntersectionTest(geom, pathSegment.ray, tmp_intersect, tmp_normal, outside); } else if (geom.type == SPHERE) { t = sphereIntersectionTest(geom, pathSegment.ray, tmp_intersect, tmp_normal, outside); } else if (geom.type == MESH) { t = meshIntersectionTest(geom, pathSegment.ray, tmp_intersect, tmp_normal, outside, vertices); } // TODO: add more intersection tests here... triangle? metaball? CSG? // Compute the minimum t from the intersection tests to determine what // scene geometry object was hit first. if (t > 0.0f && t_min > t) { t_min = t; hit_geom_index = i; intersect_point = tmp_intersect; normal = tmp_normal; } } if (hit_geom_index == -1) { intersections[path_index].t = -1.0f; } else { //The ray hits something intersections[path_index].t = t_min; intersections[path_index].materialId = geoms[hit_geom_index].materialid; intersections[path_index].surfaceNormal = normal; intersections[path_index].outside = outside; } } } // LOOK: "fake" shader demonstrating what you might do with the info in // a ShadeableIntersection, as well as how to use thrust's random number // generator. Observe that since the thrust random number generator basically // adds "noise" to the iteration, the image should start off noisy and get // cleaner as more iterations are computed. // // Note that this shader does NOT do a BSDF evaluation! // Your shaders should handle that - this can allow techniques such as // bump mapping. __global__ void shadeFakeMaterial ( int iter , int num_paths , ShadeableIntersection * shadeableIntersections , PathSegment * pathSegments , Material * materials ) { int idx = blockIdx.x * blockDim.x + threadIdx.x; if (idx < num_paths) { ShadeableIntersection intersection = shadeableIntersections[idx]; if (intersection.t > 0.0f) { // if the intersection exists... // Set up the RNG // LOOK: this is how you use thrust's RNG! Please look at // makeSeededRandomEngine as well. thrust::default_random_engine rng = makeSeededRandomEngine(iter, idx, 0); thrust::uniform_real_distribution<float> u01(0, 1); Material material = materials[intersection.materialId]; glm::vec3 materialColor = material.color; // If the material indicates that the object was a light, "light" the ray if (material.emittance > 0.0f) { pathSegments[idx].color *= (materialColor * material.emittance); } // Otherwise, do some pseudo-lighting computation. This is actually more // like what you would expect from shading in a rasterizer like OpenGL. // TODO: replace this! you should be able to start with basically a one-liner else { float lightTerm = glm::dot(intersection.surfaceNormal, glm::vec3(0.0f, 1.0f, 0.0f)); pathSegments[idx].color *= (materialColor * lightTerm) * 0.3f + ((1.0f - intersection.t * 0.02f) * materialColor) * 0.7f; pathSegments[idx].color *= u01(rng); // apply some noise because why not } // If there was no intersection, color the ray black. // Lots of renderers use 4 channel color, RGBA, where A = alpha, often // used for opacity, in which case they can indicate "no opacity". // This can be useful for post-processing and image compositing. } else { pathSegments[idx].color = glm::vec3(0.0f); } } } __global__ void shadeRealMaterial( int iter , int num_paths , ShadeableIntersection * shadeableIntersections , PathSegment * pathSegments , Material * materials , glm::vec3 * image , int *flag_array ) { int idx = blockIdx.x * blockDim.x + threadIdx.x; if (idx < num_paths) { ShadeableIntersection intersection = shadeableIntersections[idx]; PathSegment &this_Path = pathSegments[idx]; if (intersection.t > 0.0f) { // if the intersection exists... // Set up the RNG // LOOK: this is how you use thrust's RNG! Please look at // makeSeededRandomEngine as well. thrust::default_random_engine rng = makeSeededRandomEngine(iter, idx, 0); thrust::uniform_real_distribution<float> u01(0, 1); Material material = materials[intersection.materialId]; glm::vec3 materialColor = material.color; if (this_Path.remainingBounces) { flag_array[idx] = 1; scatterRay(this_Path, intersection.t * this_Path.ray.direction + this_Path.ray.origin, intersection.surfaceNormal, material, rng, intersection.outside); } else { flag_array[idx] = 0; if(this_Path.is_terminated) image[this_Path.pixelIndex] += this_Path.color; } } else { flag_array[idx] = 0; } } } __global__ void shadeRealMaterial_Direct( int iter , int num_paths , ShadeableIntersection * shadeableIntersections , PathSegment * pathSegments , Material * materials , glm::vec3 * image , int *flag_array , int *lights_indices , int num_lights , Geom * geoms , int geoms_size , Vertex *vertices ) { int idx = blockIdx.x * blockDim.x + threadIdx.x; if (idx < num_paths) { ShadeableIntersection intersection = shadeableIntersections[idx]; PathSegment &this_Path = pathSegments[idx]; thrust::default_random_engine rng = makeSeededRandomEngine(iter, idx, 0); thrust::uniform_real_distribution<float> u01(0, 1); //Direct Light if (this_Path.remainingBounces == 1) { int lights_hits = 0; glm::vec3 color; for (int i = 0; i < num_lights; i++) { int light_id = lights_indices[i]; Geom &geom_light = geoms[light_id]; this_Path.ray.direction = glm::normalize(geom_light.translation - this_Path.ray.origin); float t; glm::vec3 intersect_point; glm::vec3 normal; float t_min = FLT_MAX; int hit_geom_index = -1; glm::vec3 tmp_intersect; glm::vec3 tmp_normal; bool outside = true; //compute intersection and t for (int j = 0; j < geoms_size; j++) { Geom & geom = geoms[j]; #if MotionBlur_Toggle glm::vec3 blur_pos = glm::clamp((1 - pathSegment.rand_time) * geom.translation + pathSegment.rand_time * geom.translation_end, geom.translation, geom.translation_end); geom.transform = buildTransformationMatrix(blur_pos, geom.rotation, geom.scale); geom.inverseTransform = glm::inverse(geom.transform); geom.invTranspose = glm::inverseTranspose(geom.transform); #endif if (geom.type == CUBE) { t = boxIntersectionTest(geom, this_Path.ray, tmp_intersect, tmp_normal, outside); } else if (geom.type == SPHERE) { t = sphereIntersectionTest(geom, this_Path.ray, tmp_intersect, tmp_normal, outside); } else if (geom.type == MESH) { t = meshIntersectionTest(geom, this_Path.ray, tmp_intersect, tmp_normal, outside, vertices); } // scene geometry object was hit first. if (t > 0.0f && t_min > t) { t_min = t; hit_geom_index = j; intersect_point = tmp_intersect; normal = tmp_normal; } } if (hit_geom_index == lights_indices[i]) { lights_hits++; Material m = materials[geoms[hit_geom_index].materialid]; color += this_Path.color * (m.color * m.emittance); } } if (lights_hits) { color = color / float(lights_hits); image[this_Path.pixelIndex] += color; } flag_array[idx] = 0; this_Path.remainingBounces--; } if (intersection.t > 0.0f) { // if the intersection exists... // Set up the RNG // LOOK: this is how you use thrust's RNG! Please look at // makeSeededRandomEngine as well. Material material = materials[intersection.materialId]; glm::vec3 materialColor = material.color; if (this_Path.remainingBounces) { flag_array[idx] = 1; scatterRay(this_Path, intersection.t * this_Path.ray.direction + this_Path.ray.origin, intersection.surfaceNormal, material, rng, intersection.outside); } else { flag_array[idx] = 0; if (this_Path.is_terminated) image[this_Path.pixelIndex] += this_Path.color; } } else { flag_array[idx] = 0; } } } // Add the current iteration's output to the overall image __global__ void finalGather(int nPaths, glm::vec3 * image, PathSegment * iterationPaths) { int index = (blockIdx.x * blockDim.x) + threadIdx.x; if (index < nPaths) { PathSegment iterationPath = iterationPaths[index]; image[iterationPath.pixelIndex] += iterationPath.color; } } __global__ void kernScatterPaths(int n, PathSegment *odata, const PathSegment *idata, const int *bools, const int *indices) { int index = threadIdx.x + (blockIdx.x * blockDim.x); if (index >= n) return; if (bools[index]) odata[indices[index]] = idata[index]; } int compact_Paths(int n) { // TODO if (n <= 0) return -1; int celllog = ilog2ceil(n); int pow2len = 1 << celllog; int *dev_indices; hipMalloc((void**)&dev_indices, pow2len * sizeof(int)); checkCUDAError("hipMalloc dev_indices failed!"); PathSegment *dev_temp_paths; hipMalloc((void**)&dev_temp_paths, pow2len * sizeof(PathSegment)); hipMemcpy(dev_temp_paths, dev_paths, n * sizeof(PathSegment), hipMemcpyDeviceToDevice); // Scan hipMemcpy(dev_indices, dev_flag_array, n * sizeof(int), hipMemcpyDeviceToDevice); checkCUDAError("hipMemcpy failed!"); int blockSize = 128; int blockNum; //Up-Sweep for (int d = 0; d <= celllog - 1; d++) { int interval_length = (1 << (d + 1)); blockNum = (pow2len / interval_length + blockSize) / blockSize; StreamCompaction::Efficient::cudaSweepUp << <blockNum, blockSize >> >(pow2len / interval_length, d, dev_indices); } //Down-Sweep hipMemset(dev_indices + pow2len - 1, 0, sizeof(int)); checkCUDAError("hipMemset failed!"); for (int d = celllog - 1; d >= 0; d--) { int interval_length = (1 << (d + 1)); blockNum = (pow2len / interval_length + blockSize) / blockSize; StreamCompaction::Efficient::cudaSweepDown << <blockNum, blockSize >> >(pow2len / interval_length, d, dev_indices); } //Scattered blockNum = (n + blockSize) / blockSize; kernScatterPaths << <blockNum, blockSize >> >(n, dev_paths, dev_temp_paths, dev_flag_array, dev_indices); //compute count int a, b; hipMemcpy(&a, dev_flag_array + (n - 1), sizeof(int), hipMemcpyDeviceToHost); hipMemcpy(&b, dev_indices + (n - 1), sizeof(int), hipMemcpyDeviceToHost); int count = a + b; //Free data hipFree(dev_indices); checkCUDAError("hipFree dev_idata failed!"); hipFree(dev_temp_paths); checkCUDAError("hipFree dev_temp_paths failed!"); return count; } void compressedPathandIntersection(int& num_paths, PathSegment *paths, int *flag) { thrust::device_ptr<int> dev_ptrFlag(flag); thrust::device_ptr<PathSegment> dev_ptrPaths(paths); thrust::remove_if(dev_ptrPaths, dev_ptrPaths + num_paths, dev_ptrFlag, thrust::logical_not<int>()); num_paths = thrust::count_if(dev_ptrFlag, dev_ptrFlag + num_paths, thrust::identity<int>()); } // Sort by materialId typedef thrust::tuple<PathSegment, ShadeableIntersection> Tuple; class cmp { public: __host__ __device__ bool operator()(const Tuple &a, const Tuple &b) { return a.get<1>().materialId < b.get<1>().materialId; } }; void sortByMaterialId(int num_paths, PathSegment *dev_paths, ShadeableIntersection *dev_intersections) { thrust::device_ptr<PathSegment> ptrPath(dev_paths); thrust::device_ptr<ShadeableIntersection> ptrIntersection(dev_intersections); typedef thrust::tuple<thrust::device_ptr<PathSegment>, thrust::device_ptr<ShadeableIntersection>> IteratorTuple; typedef thrust::zip_iterator<IteratorTuple> ZipIterator; ZipIterator zip_begin = thrust::make_zip_iterator(thrust::make_tuple(ptrPath, ptrIntersection)); ZipIterator zip_end = zip_begin + num_paths; thrust::sort(zip_begin, zip_end, cmp()); } /** * Wrapper for the __global__ call that sets up the kernel calls and does a ton * of memory management */ void pathtrace(uchar4 *pbo, int frame, int iter) { const int traceDepth = hst_scene->state.traceDepth; const Camera &cam = hst_scene->state.camera; const int pixelcount = cam.resolution.x * cam.resolution.y; // 2D block for generating ray from camera const dim3 blockSize2d(8, 8); const dim3 blocksPerGrid2d( (cam.resolution.x + blockSize2d.x - 1) / blockSize2d.x, (cam.resolution.y + blockSize2d.y - 1) / blockSize2d.y); // 1D block for path tracing const int blockSize1d = 128; /////////////////////////////////////////////////////////////////////////// // Recap: // * Initialize array of path rays (using rays that come out of the camera) // * You can pass the Camera object to that kernel. // * Each path ray must carry at minimum a (ray, color) pair, // * where color starts as the multiplicative identity, white = (1, 1, 1). // * This has already been done for you. // * For each depth: // * Compute an intersection in the scene for each path ray. // A very naive version of this has been implemented for you, but feel // free to add more primitives and/or a better algorithm. // Currently, intersection distance is recorded as a parametric distance, // t, or a "distance along the ray." t = -1.0 indicates no intersection. // * Color is attenuated (multiplied) by reflections off of any object // * TODO: Stream compact away all of the terminated paths. // You may use either your implementation or `thrust::remove_if` or its // cousins. // * Note that you can't really use a 2D kernel launch any more - switch // to 1D. // * TODO: Shade the rays that intersected something or didn't bottom out. // That is, color the ray by performing a color computation according // to the shader, then generate a new ray to continue the ray path. // We recommend just updating the ray's PathSegment in place. // Note that this step may come before or after stream compaction, // since some shaders you write may also cause a path to terminate. // * Finally, add this iteration's results to the image. This has been done // for you. // TODO: perform one iteration of path tracing // -------------- Caching -------------- int depth = 0; PathSegment* dev_path_end = dev_paths + pixelcount; int num_paths = dev_path_end - dev_paths; bool outside = true; StreamCompaction::Efficient::timer().startGpuTimer(); #if Caching_Toggle && !AntiAliasing_Toggle && !MotionBlur_Toggle && !Depth_Of_Field_Toggle if (iter == 1) { generateRayFromCamera << <blocksPerGrid2d, blockSize2d >> >(cam, iter, traceDepth, dev_paths); checkCUDAError("generate camera ray"); hipMemcpy(dev_cache_paths, dev_paths, num_paths * sizeof(PathSegment), hipMemcpyDeviceToDevice); checkCUDAError("Memcpy dev_paths to dev_cache_paths"); // clean shading chunks hipMemset(dev_intersections, 0, pixelcount * sizeof(ShadeableIntersection)); // tracing dim3 numblocksPathSegmentTracing = (num_paths + blockSize1d - 1) / blockSize1d; computeIntersections << <numblocksPathSegmentTracing, blockSize1d >> > ( depth , num_paths , dev_paths , dev_geoms , hst_scene->geoms.size() , dev_intersections , dev_vertices ); checkCUDAError("trace one bounce"); hipMemcpy(dev_cache_intersections, dev_intersections, num_paths * sizeof(ShadeableIntersection), hipMemcpyDeviceToDevice); checkCUDAError("Memcpy dev_intersections to dev_cache_intersections"); } #endif // --------------- PathSegment Tracing Stage ----------------- // Shoot ray into scene, bounce between objects, push shading chunks bool iterationComplete = false; bool firstStage = true; while (!iterationComplete) { dim3 numblocksPathSegmentTracing = (num_paths + blockSize1d - 1) / blockSize1d; // clean shading chunks hipMemset(dev_intersections, 0, pixelcount * sizeof(ShadeableIntersection)); if (firstStage) { firstStage = false; #if Caching_Toggle && !AntiAliasing_Toggle && !MotionBlur_Toggle && !Depth_Of_Field_Toggle hipMemcpy(dev_paths, dev_cache_paths, num_paths * sizeof(PathSegment), hipMemcpyDeviceToDevice); hipMemcpy(dev_intersections, dev_cache_intersections, num_paths * sizeof(ShadeableIntersection), hipMemcpyDeviceToDevice); #else generateRayFromCamera << <blocksPerGrid2d, blockSize2d >> >(cam, iter, traceDepth, dev_paths); checkCUDAError("generate camera ray"); // clean shading chunks hipMemset(dev_intersections, 0, pixelcount * sizeof(ShadeableIntersection)); // tracing dim3 numblocksPathSegmentTracing = (num_paths + blockSize1d - 1) / blockSize1d; computeIntersections << <numblocksPathSegmentTracing, blockSize1d >> > ( depth , num_paths , dev_paths , dev_geoms , hst_scene->geoms.size() , dev_intersections , dev_vertices ); checkCUDAError("trace one bounce"); #endif } else { computeIntersections << <numblocksPathSegmentTracing, blockSize1d >> > ( depth , num_paths , dev_paths , dev_geoms , hst_scene->geoms.size() , dev_intersections , dev_vertices ); checkCUDAError("trace one bounce"); hipDeviceSynchronize(); } depth++; // TODO: // ---------------- Shading Stage ------------------ // Shade path segments based on intersections and generate new rays by // evaluating the BSDF. // Start off with just a big kernel that handles all the different // materials you have in the scenefile. // TODO: compare between directly shading the path segments and shading // path segments that have been reshuffled to be contiguous in memory. //The last //int num_materials = 5; //StreamCompaction::Radix::RadixSort_Path_Interactions(num_paths, dev_paths, dev_intersections, num_materials); #if Sorting_Toggle sortByMaterialId(num_paths, dev_paths, dev_intersections); #endif #if Direct_Light_Toggle shadeRealMaterial_Direct << <numblocksPathSegmentTracing, blockSize1d >> >( iter, num_paths, dev_intersections, dev_paths, dev_materials, dev_image, dev_flag_array, dev_lights_indices, hst_scene->lights_indices.size(), dev_geoms, hst_scene->geoms.size(), dev_vertices ); #else shadeRealMaterial << <numblocksPathSegmentTracing, blockSize1d >> >( iter, num_paths, dev_intersections, dev_paths, dev_materials, dev_image, dev_flag_array ); #endif //get new paths and new flag_array // ----------------- Stream Compaction ---------------- num_paths = compact_Paths(num_paths); //compressedPathandIntersection(num_paths, dev_paths, dev_flag_array); //get new path pool and num_paths if(num_paths <= 0) iterationComplete = true; // TODO: should be based off stream compaction results. } // Assemble this iteration and apply it to the image /*dim3 numBlocksPixels = (pixelcount + blockSize1d - 1) / blockSize1d; finalGather << <numBlocksPixels, blockSize1d >> >(num_paths, dev_image, dev_paths);*/ /////////////////////////////////////////////////////////////////////////// StreamCompaction::Efficient::timer().endGpuTimer(); //std::cout << " elapsed time: " << StreamCompaction::Efficient::timer().getGpuElapsedTimeForPreviousOperation() << " ms\n"; fprintf(fp, "%lf\n", StreamCompaction::Efficient::timer().getGpuElapsedTimeForPreviousOperation()); // Send results to OpenGL buffer for rendering sendImageToPBO << <blocksPerGrid2d, blockSize2d >> >(pbo, cam.resolution, iter, dev_image); // Retrieve image from GPU hipMemcpy(hst_scene->state.image.data(), dev_image, pixelcount * sizeof(glm::vec3), hipMemcpyDeviceToHost); checkCUDAError("pathtrace"); }
98c9ee512158d0e7621ca5d29b2b9b2472504c3e.cu
#include <cstdio> #include <cuda.h> #include <cmath> #include <thrust/execution_policy.h> #include <thrust/random.h> #include <thrust/remove.h> #include "sceneStructs.h" #include "scene.h" #include "glm/glm.hpp" #include "glm/gtx/norm.hpp" #include <glm/gtc/matrix_transform.hpp> #include <glm/gtc/matrix_inverse.hpp> #include "utilities.h" #include "pathtrace.h" #include "intersections.h" #include "interactions.h" #include "efficient.h" #include <thrust/device_vector.h> #include <thrust/host_vector.h> #include <thrust/scan.h> #include "radix.h" #include <fstream> //Toggles Defines #include "Setting_defines.h" //Results file extern FILE *fp; #define ERRORCHECK 1 #define FILENAME (strrchr(__FILE__, '/') ? strrchr(__FILE__, '/') + 1 : __FILE__) #define checkCUDAError(msg) checkCUDAErrorFn(msg, FILENAME, __LINE__) void checkCUDAErrorFn(const char *msg, const char *file, int line) { #if ERRORCHECK cudaDeviceSynchronize(); cudaError_t err = cudaGetLastError(); if (cudaSuccess == err) { return; } fprintf(stderr, "CUDA error"); if (file) { fprintf(stderr, " (%s:%d)", file, line); } fprintf(stderr, ": %s: %s\n", msg, cudaGetErrorString(err)); # ifdef _WIN32 getchar(); # endif exit(EXIT_FAILURE); #endif } __host__ __device__ thrust::default_random_engine makeSeededRandomEngine(int iter, int index, int depth) { int h = utilhash((1 << 31) | (depth << 22) | iter) ^ utilhash(index); return thrust::default_random_engine(h); } //Kernel that writes the image to the OpenGL PBO directly. __global__ void sendImageToPBO(uchar4* pbo, glm::ivec2 resolution, int iter, glm::vec3* image) { int x = (blockIdx.x * blockDim.x) + threadIdx.x; int y = (blockIdx.y * blockDim.y) + threadIdx.y; if (x < resolution.x && y < resolution.y) { int index = x + (y * resolution.x); glm::vec3 pix = image[index]; glm::ivec3 color; color.x = glm::clamp((int) (pix.x / iter * 255.0), 0, 255); color.y = glm::clamp((int) (pix.y / iter * 255.0), 0, 255); color.z = glm::clamp((int) (pix.z / iter * 255.0), 0, 255); // Each thread writes one pixel location in the texture (textel) pbo[index].w = 0; pbo[index].x = color.x; pbo[index].y = color.y; pbo[index].z = color.z; } } static Scene * hst_scene = NULL; static glm::vec3 * dev_image = NULL; static Geom * dev_geoms = NULL; static Material * dev_materials = NULL; static PathSegment * dev_paths = NULL; static ShadeableIntersection * dev_intersections = NULL; // TODO: static variables for device memory, any extra info you need, etc static PathSegment * dev_cache_paths = NULL; static ShadeableIntersection * dev_cache_intersections = NULL; static int * dev_flag_array = NULL; static Vertex * dev_vertices = NULL; static int * dev_lights_indices = NULL; // ... void pathtraceInit(Scene *scene) { hst_scene = scene; const Camera &cam = hst_scene->state.camera; const int pixelcount = cam.resolution.x * cam.resolution.y; cudaMalloc(&dev_image, pixelcount * sizeof(glm::vec3)); cudaMemset(dev_image, 0, pixelcount * sizeof(glm::vec3)); cudaMalloc(&dev_paths, pixelcount * sizeof(PathSegment)); cudaMalloc(&dev_geoms, scene->geoms.size() * sizeof(Geom)); cudaMemcpy(dev_geoms, scene->geoms.data(), scene->geoms.size() * sizeof(Geom), cudaMemcpyHostToDevice); cudaMalloc(&dev_materials, scene->materials.size() * sizeof(Material)); cudaMemcpy(dev_materials, scene->materials.data(), scene->materials.size() * sizeof(Material), cudaMemcpyHostToDevice); cudaMalloc(&dev_intersections, pixelcount * sizeof(ShadeableIntersection)); cudaMemset(dev_intersections, 0, pixelcount * sizeof(ShadeableIntersection)); // TODO: initialize any extra device memeory you need //Cache cudaMalloc(&dev_cache_paths, pixelcount * sizeof(PathSegment)); cudaMalloc(&dev_cache_intersections, pixelcount * sizeof(ShadeableIntersection)); cudaMemset(dev_intersections, 0, pixelcount * sizeof(ShadeableIntersection)); cudaMalloc(&dev_flag_array, pixelcount * sizeof(int)); //Smooth Normals: //hst_scene->Smooth_Normals(); cudaMalloc(&dev_vertices, hst_scene->vertices.size() * sizeof(Vertex)); cudaMemcpy(dev_vertices, hst_scene->vertices.data(), hst_scene->vertices.size() * sizeof(Vertex), cudaMemcpyHostToDevice); cudaMalloc(&dev_lights_indices, hst_scene->lights_indices.size() * sizeof(int)); cudaMemcpy(dev_lights_indices, hst_scene->lights_indices.data(), hst_scene->lights_indices.size() * sizeof(int), cudaMemcpyHostToDevice); checkCUDAError("pathtraceInit"); } void pathtraceFree() { cudaFree(dev_image); // no-op if dev_image is null cudaFree(dev_paths); cudaFree(dev_geoms); cudaFree(dev_materials); cudaFree(dev_intersections); // TODO: clean up any extra device memory you created cudaFree(dev_cache_paths); cudaFree(dev_cache_intersections); cudaFree(dev_flag_array); cudaFree(dev_lights_indices); checkCUDAError("pathtraceFree"); } //Reference: PBRT source code https://www.dartdocs.org/documentation/dartray/0.0.1/core/ConcentricSampleDisk.html //ConcentricSampleDisk __device__ glm::vec2 ConcentricSampleDisk(float u1, float u2) { float r, theta; float a, b; // Map uniform random numbers to $[-1,1]^2$ float sx = 2 * u1 - 1; float sy = 2 * u2 - 1; if (sx == 0.0 && sy == 0.0) { return glm::vec2(0.f); } if (sx >= -sy) { if (sx > sy) { // Handle first region of disk r = sx; if (sy > 0.0) theta = sy / r; else theta = 8.0f + sy / r; } else { // Handle second region of disk r = sy; theta = 2.0f - sx / r; } } else { if (sx <= sy) { // Handle third region of disk r = -sx; theta = 4.0f - sy / r; } else { // Handle fourth region of disk r = -sy; theta = 6.0f + sx / r; } } theta *= PI / 4.f; a = r * cosf(theta); b = r * sinf(theta); glm::vec2 returnValue(a, b); return returnValue; } /** * Generate PathSegments with rays from the camera through the screen into the * scene, which is the first bounce of rays. * * Antialiasing - add rays for sub-pixel sampling * motion blur - jitter rays "in time" * lens effect - jitter ray origin positions based on a lens */ __global__ void generateRayFromCamera(Camera cam, int iter, int traceDepth, PathSegment* pathSegments) { int x = (blockIdx.x * blockDim.x) + threadIdx.x; int y = (blockIdx.y * blockDim.y) + threadIdx.y; if (x < cam.resolution.x && y < cam.resolution.y) { int index = x + (y * cam.resolution.x); PathSegment & segment = pathSegments[index]; segment.ray.origin = cam.position; segment.color = glm::vec3(1.0f, 1.0f, 1.0f); segment.is_terminated = false; thrust::default_random_engine rng = makeSeededRandomEngine(iter, x + y, 0); thrust::uniform_real_distribution<float> u01(0, 1); // TODO: implement antialiasing by jittering the ray //Stochastic Sampling #if AntiAliasing_Toggle segment.ray.direction = glm::normalize(cam.view - cam.right * cam.pixelLength.x * ((float)(x + u01(rng)) - (float)cam.resolution.x * 0.5f) - cam.up * cam.pixelLength.y * ((float)(y + u01(rng)) - (float)cam.resolution.y * 0.5f) ); #else segment.ray.direction = glm::normalize(cam.view - cam.right * cam.pixelLength.x * ((float)x - (float)cam.resolution.x * 0.5f) - cam.up * cam.pixelLength.y * ((float)y - (float)cam.resolution.y * 0.5f) ); #endif //Depth of Field #if Depth_Of_Field_Toggle float u1 = u01(rng), u2 = u01(rng); glm::vec2 pLens = cam.lensRadius * ConcentricSampleDisk(u1, u2); glm::vec3 pFocus = segment.ray.origin + glm::abs(cam.focalLength / segment.ray.direction.z) * segment.ray.direction; segment.ray.origin += pLens.x * cam.right + pLens.y * cam.up; segment.ray.direction = glm::normalize(pFocus - segment.ray.origin); #endif segment.rand_time = u01(rng); segment.pixelIndex = index; segment.remainingBounces = traceDepth; } } __host__ __device__ glm::mat4 buildTransformationMatrix(glm::vec3 translation, glm::vec3 rotation, glm::vec3 scale) { glm::mat4 translationMat = glm::translate(glm::mat4(), translation); glm::mat4 rotationMat = glm::rotate(glm::mat4(), rotation.x * (float)PI / 180, glm::vec3(1, 0, 0)); rotationMat = rotationMat * glm::rotate(glm::mat4(), rotation.y * (float)PI / 180, glm::vec3(0, 1, 0)); rotationMat = rotationMat * glm::rotate(glm::mat4(), rotation.z * (float)PI / 180, glm::vec3(0, 0, 1)); glm::mat4 scaleMat = glm::scale(glm::mat4(), scale); return translationMat * rotationMat * scaleMat; } // TODO: // computeIntersections handles generating ray intersections ONLY. // Generating new rays is handled in your shader(s). // Feel free to modify the code below. __global__ void computeIntersections( int depth , int num_paths , PathSegment * pathSegments , Geom * geoms , int geoms_size , ShadeableIntersection * intersections , Vertex *vertices ) { int path_index = blockIdx.x * blockDim.x + threadIdx.x; if (path_index < num_paths) { PathSegment pathSegment = pathSegments[path_index]; float t; glm::vec3 intersect_point; glm::vec3 normal; float t_min = FLT_MAX; int hit_geom_index = -1; glm::vec3 tmp_intersect; glm::vec3 tmp_normal; bool outside = true; // naive parse through global geoms for (int i = 0; i < geoms_size; i++) { Geom & geom = geoms[i]; #if MotionBlur_Toggle glm::vec3 blur_pos = glm::clamp((1 - pathSegment.rand_time) * geom.translation + pathSegment.rand_time * geom.translation_end, geom.translation, geom.translation_end); geom.transform = buildTransformationMatrix(blur_pos, geom.rotation, geom.scale); geom.inverseTransform = glm::inverse(geom.transform); geom.invTranspose = glm::inverseTranspose(geom.transform); #endif if (geom.type == CUBE) { t = boxIntersectionTest(geom, pathSegment.ray, tmp_intersect, tmp_normal, outside); } else if (geom.type == SPHERE) { t = sphereIntersectionTest(geom, pathSegment.ray, tmp_intersect, tmp_normal, outside); } else if (geom.type == MESH) { t = meshIntersectionTest(geom, pathSegment.ray, tmp_intersect, tmp_normal, outside, vertices); } // TODO: add more intersection tests here... triangle? metaball? CSG? // Compute the minimum t from the intersection tests to determine what // scene geometry object was hit first. if (t > 0.0f && t_min > t) { t_min = t; hit_geom_index = i; intersect_point = tmp_intersect; normal = tmp_normal; } } if (hit_geom_index == -1) { intersections[path_index].t = -1.0f; } else { //The ray hits something intersections[path_index].t = t_min; intersections[path_index].materialId = geoms[hit_geom_index].materialid; intersections[path_index].surfaceNormal = normal; intersections[path_index].outside = outside; } } } // LOOK: "fake" shader demonstrating what you might do with the info in // a ShadeableIntersection, as well as how to use thrust's random number // generator. Observe that since the thrust random number generator basically // adds "noise" to the iteration, the image should start off noisy and get // cleaner as more iterations are computed. // // Note that this shader does NOT do a BSDF evaluation! // Your shaders should handle that - this can allow techniques such as // bump mapping. __global__ void shadeFakeMaterial ( int iter , int num_paths , ShadeableIntersection * shadeableIntersections , PathSegment * pathSegments , Material * materials ) { int idx = blockIdx.x * blockDim.x + threadIdx.x; if (idx < num_paths) { ShadeableIntersection intersection = shadeableIntersections[idx]; if (intersection.t > 0.0f) { // if the intersection exists... // Set up the RNG // LOOK: this is how you use thrust's RNG! Please look at // makeSeededRandomEngine as well. thrust::default_random_engine rng = makeSeededRandomEngine(iter, idx, 0); thrust::uniform_real_distribution<float> u01(0, 1); Material material = materials[intersection.materialId]; glm::vec3 materialColor = material.color; // If the material indicates that the object was a light, "light" the ray if (material.emittance > 0.0f) { pathSegments[idx].color *= (materialColor * material.emittance); } // Otherwise, do some pseudo-lighting computation. This is actually more // like what you would expect from shading in a rasterizer like OpenGL. // TODO: replace this! you should be able to start with basically a one-liner else { float lightTerm = glm::dot(intersection.surfaceNormal, glm::vec3(0.0f, 1.0f, 0.0f)); pathSegments[idx].color *= (materialColor * lightTerm) * 0.3f + ((1.0f - intersection.t * 0.02f) * materialColor) * 0.7f; pathSegments[idx].color *= u01(rng); // apply some noise because why not } // If there was no intersection, color the ray black. // Lots of renderers use 4 channel color, RGBA, where A = alpha, often // used for opacity, in which case they can indicate "no opacity". // This can be useful for post-processing and image compositing. } else { pathSegments[idx].color = glm::vec3(0.0f); } } } __global__ void shadeRealMaterial( int iter , int num_paths , ShadeableIntersection * shadeableIntersections , PathSegment * pathSegments , Material * materials , glm::vec3 * image , int *flag_array ) { int idx = blockIdx.x * blockDim.x + threadIdx.x; if (idx < num_paths) { ShadeableIntersection intersection = shadeableIntersections[idx]; PathSegment &this_Path = pathSegments[idx]; if (intersection.t > 0.0f) { // if the intersection exists... // Set up the RNG // LOOK: this is how you use thrust's RNG! Please look at // makeSeededRandomEngine as well. thrust::default_random_engine rng = makeSeededRandomEngine(iter, idx, 0); thrust::uniform_real_distribution<float> u01(0, 1); Material material = materials[intersection.materialId]; glm::vec3 materialColor = material.color; if (this_Path.remainingBounces) { flag_array[idx] = 1; scatterRay(this_Path, intersection.t * this_Path.ray.direction + this_Path.ray.origin, intersection.surfaceNormal, material, rng, intersection.outside); } else { flag_array[idx] = 0; if(this_Path.is_terminated) image[this_Path.pixelIndex] += this_Path.color; } } else { flag_array[idx] = 0; } } } __global__ void shadeRealMaterial_Direct( int iter , int num_paths , ShadeableIntersection * shadeableIntersections , PathSegment * pathSegments , Material * materials , glm::vec3 * image , int *flag_array , int *lights_indices , int num_lights , Geom * geoms , int geoms_size , Vertex *vertices ) { int idx = blockIdx.x * blockDim.x + threadIdx.x; if (idx < num_paths) { ShadeableIntersection intersection = shadeableIntersections[idx]; PathSegment &this_Path = pathSegments[idx]; thrust::default_random_engine rng = makeSeededRandomEngine(iter, idx, 0); thrust::uniform_real_distribution<float> u01(0, 1); //Direct Light if (this_Path.remainingBounces == 1) { int lights_hits = 0; glm::vec3 color; for (int i = 0; i < num_lights; i++) { int light_id = lights_indices[i]; Geom &geom_light = geoms[light_id]; this_Path.ray.direction = glm::normalize(geom_light.translation - this_Path.ray.origin); float t; glm::vec3 intersect_point; glm::vec3 normal; float t_min = FLT_MAX; int hit_geom_index = -1; glm::vec3 tmp_intersect; glm::vec3 tmp_normal; bool outside = true; //compute intersection and t for (int j = 0; j < geoms_size; j++) { Geom & geom = geoms[j]; #if MotionBlur_Toggle glm::vec3 blur_pos = glm::clamp((1 - pathSegment.rand_time) * geom.translation + pathSegment.rand_time * geom.translation_end, geom.translation, geom.translation_end); geom.transform = buildTransformationMatrix(blur_pos, geom.rotation, geom.scale); geom.inverseTransform = glm::inverse(geom.transform); geom.invTranspose = glm::inverseTranspose(geom.transform); #endif if (geom.type == CUBE) { t = boxIntersectionTest(geom, this_Path.ray, tmp_intersect, tmp_normal, outside); } else if (geom.type == SPHERE) { t = sphereIntersectionTest(geom, this_Path.ray, tmp_intersect, tmp_normal, outside); } else if (geom.type == MESH) { t = meshIntersectionTest(geom, this_Path.ray, tmp_intersect, tmp_normal, outside, vertices); } // scene geometry object was hit first. if (t > 0.0f && t_min > t) { t_min = t; hit_geom_index = j; intersect_point = tmp_intersect; normal = tmp_normal; } } if (hit_geom_index == lights_indices[i]) { lights_hits++; Material m = materials[geoms[hit_geom_index].materialid]; color += this_Path.color * (m.color * m.emittance); } } if (lights_hits) { color = color / float(lights_hits); image[this_Path.pixelIndex] += color; } flag_array[idx] = 0; this_Path.remainingBounces--; } if (intersection.t > 0.0f) { // if the intersection exists... // Set up the RNG // LOOK: this is how you use thrust's RNG! Please look at // makeSeededRandomEngine as well. Material material = materials[intersection.materialId]; glm::vec3 materialColor = material.color; if (this_Path.remainingBounces) { flag_array[idx] = 1; scatterRay(this_Path, intersection.t * this_Path.ray.direction + this_Path.ray.origin, intersection.surfaceNormal, material, rng, intersection.outside); } else { flag_array[idx] = 0; if (this_Path.is_terminated) image[this_Path.pixelIndex] += this_Path.color; } } else { flag_array[idx] = 0; } } } // Add the current iteration's output to the overall image __global__ void finalGather(int nPaths, glm::vec3 * image, PathSegment * iterationPaths) { int index = (blockIdx.x * blockDim.x) + threadIdx.x; if (index < nPaths) { PathSegment iterationPath = iterationPaths[index]; image[iterationPath.pixelIndex] += iterationPath.color; } } __global__ void kernScatterPaths(int n, PathSegment *odata, const PathSegment *idata, const int *bools, const int *indices) { int index = threadIdx.x + (blockIdx.x * blockDim.x); if (index >= n) return; if (bools[index]) odata[indices[index]] = idata[index]; } int compact_Paths(int n) { // TODO if (n <= 0) return -1; int celllog = ilog2ceil(n); int pow2len = 1 << celllog; int *dev_indices; cudaMalloc((void**)&dev_indices, pow2len * sizeof(int)); checkCUDAError("cudaMalloc dev_indices failed!"); PathSegment *dev_temp_paths; cudaMalloc((void**)&dev_temp_paths, pow2len * sizeof(PathSegment)); cudaMemcpy(dev_temp_paths, dev_paths, n * sizeof(PathSegment), cudaMemcpyDeviceToDevice); // Scan cudaMemcpy(dev_indices, dev_flag_array, n * sizeof(int), cudaMemcpyDeviceToDevice); checkCUDAError("cudaMemcpy failed!"); int blockSize = 128; int blockNum; //Up-Sweep for (int d = 0; d <= celllog - 1; d++) { int interval_length = (1 << (d + 1)); blockNum = (pow2len / interval_length + blockSize) / blockSize; StreamCompaction::Efficient::cudaSweepUp << <blockNum, blockSize >> >(pow2len / interval_length, d, dev_indices); } //Down-Sweep cudaMemset(dev_indices + pow2len - 1, 0, sizeof(int)); checkCUDAError("cudaMemset failed!"); for (int d = celllog - 1; d >= 0; d--) { int interval_length = (1 << (d + 1)); blockNum = (pow2len / interval_length + blockSize) / blockSize; StreamCompaction::Efficient::cudaSweepDown << <blockNum, blockSize >> >(pow2len / interval_length, d, dev_indices); } //Scattered blockNum = (n + blockSize) / blockSize; kernScatterPaths << <blockNum, blockSize >> >(n, dev_paths, dev_temp_paths, dev_flag_array, dev_indices); //compute count int a, b; cudaMemcpy(&a, dev_flag_array + (n - 1), sizeof(int), cudaMemcpyDeviceToHost); cudaMemcpy(&b, dev_indices + (n - 1), sizeof(int), cudaMemcpyDeviceToHost); int count = a + b; //Free data cudaFree(dev_indices); checkCUDAError("cudaFree dev_idata failed!"); cudaFree(dev_temp_paths); checkCUDAError("cudaFree dev_temp_paths failed!"); return count; } void compressedPathandIntersection(int& num_paths, PathSegment *paths, int *flag) { thrust::device_ptr<int> dev_ptrFlag(flag); thrust::device_ptr<PathSegment> dev_ptrPaths(paths); thrust::remove_if(dev_ptrPaths, dev_ptrPaths + num_paths, dev_ptrFlag, thrust::logical_not<int>()); num_paths = thrust::count_if(dev_ptrFlag, dev_ptrFlag + num_paths, thrust::identity<int>()); } // Sort by materialId typedef thrust::tuple<PathSegment, ShadeableIntersection> Tuple; class cmp { public: __host__ __device__ bool operator()(const Tuple &a, const Tuple &b) { return a.get<1>().materialId < b.get<1>().materialId; } }; void sortByMaterialId(int num_paths, PathSegment *dev_paths, ShadeableIntersection *dev_intersections) { thrust::device_ptr<PathSegment> ptrPath(dev_paths); thrust::device_ptr<ShadeableIntersection> ptrIntersection(dev_intersections); typedef thrust::tuple<thrust::device_ptr<PathSegment>, thrust::device_ptr<ShadeableIntersection>> IteratorTuple; typedef thrust::zip_iterator<IteratorTuple> ZipIterator; ZipIterator zip_begin = thrust::make_zip_iterator(thrust::make_tuple(ptrPath, ptrIntersection)); ZipIterator zip_end = zip_begin + num_paths; thrust::sort(zip_begin, zip_end, cmp()); } /** * Wrapper for the __global__ call that sets up the kernel calls and does a ton * of memory management */ void pathtrace(uchar4 *pbo, int frame, int iter) { const int traceDepth = hst_scene->state.traceDepth; const Camera &cam = hst_scene->state.camera; const int pixelcount = cam.resolution.x * cam.resolution.y; // 2D block for generating ray from camera const dim3 blockSize2d(8, 8); const dim3 blocksPerGrid2d( (cam.resolution.x + blockSize2d.x - 1) / blockSize2d.x, (cam.resolution.y + blockSize2d.y - 1) / blockSize2d.y); // 1D block for path tracing const int blockSize1d = 128; /////////////////////////////////////////////////////////////////////////// // Recap: // * Initialize array of path rays (using rays that come out of the camera) // * You can pass the Camera object to that kernel. // * Each path ray must carry at minimum a (ray, color) pair, // * where color starts as the multiplicative identity, white = (1, 1, 1). // * This has already been done for you. // * For each depth: // * Compute an intersection in the scene for each path ray. // A very naive version of this has been implemented for you, but feel // free to add more primitives and/or a better algorithm. // Currently, intersection distance is recorded as a parametric distance, // t, or a "distance along the ray." t = -1.0 indicates no intersection. // * Color is attenuated (multiplied) by reflections off of any object // * TODO: Stream compact away all of the terminated paths. // You may use either your implementation or `thrust::remove_if` or its // cousins. // * Note that you can't really use a 2D kernel launch any more - switch // to 1D. // * TODO: Shade the rays that intersected something or didn't bottom out. // That is, color the ray by performing a color computation according // to the shader, then generate a new ray to continue the ray path. // We recommend just updating the ray's PathSegment in place. // Note that this step may come before or after stream compaction, // since some shaders you write may also cause a path to terminate. // * Finally, add this iteration's results to the image. This has been done // for you. // TODO: perform one iteration of path tracing // -------------- Caching -------------- int depth = 0; PathSegment* dev_path_end = dev_paths + pixelcount; int num_paths = dev_path_end - dev_paths; bool outside = true; StreamCompaction::Efficient::timer().startGpuTimer(); #if Caching_Toggle && !AntiAliasing_Toggle && !MotionBlur_Toggle && !Depth_Of_Field_Toggle if (iter == 1) { generateRayFromCamera << <blocksPerGrid2d, blockSize2d >> >(cam, iter, traceDepth, dev_paths); checkCUDAError("generate camera ray"); cudaMemcpy(dev_cache_paths, dev_paths, num_paths * sizeof(PathSegment), cudaMemcpyDeviceToDevice); checkCUDAError("Memcpy dev_paths to dev_cache_paths"); // clean shading chunks cudaMemset(dev_intersections, 0, pixelcount * sizeof(ShadeableIntersection)); // tracing dim3 numblocksPathSegmentTracing = (num_paths + blockSize1d - 1) / blockSize1d; computeIntersections << <numblocksPathSegmentTracing, blockSize1d >> > ( depth , num_paths , dev_paths , dev_geoms , hst_scene->geoms.size() , dev_intersections , dev_vertices ); checkCUDAError("trace one bounce"); cudaMemcpy(dev_cache_intersections, dev_intersections, num_paths * sizeof(ShadeableIntersection), cudaMemcpyDeviceToDevice); checkCUDAError("Memcpy dev_intersections to dev_cache_intersections"); } #endif // --------------- PathSegment Tracing Stage ----------------- // Shoot ray into scene, bounce between objects, push shading chunks bool iterationComplete = false; bool firstStage = true; while (!iterationComplete) { dim3 numblocksPathSegmentTracing = (num_paths + blockSize1d - 1) / blockSize1d; // clean shading chunks cudaMemset(dev_intersections, 0, pixelcount * sizeof(ShadeableIntersection)); if (firstStage) { firstStage = false; #if Caching_Toggle && !AntiAliasing_Toggle && !MotionBlur_Toggle && !Depth_Of_Field_Toggle cudaMemcpy(dev_paths, dev_cache_paths, num_paths * sizeof(PathSegment), cudaMemcpyDeviceToDevice); cudaMemcpy(dev_intersections, dev_cache_intersections, num_paths * sizeof(ShadeableIntersection), cudaMemcpyDeviceToDevice); #else generateRayFromCamera << <blocksPerGrid2d, blockSize2d >> >(cam, iter, traceDepth, dev_paths); checkCUDAError("generate camera ray"); // clean shading chunks cudaMemset(dev_intersections, 0, pixelcount * sizeof(ShadeableIntersection)); // tracing dim3 numblocksPathSegmentTracing = (num_paths + blockSize1d - 1) / blockSize1d; computeIntersections << <numblocksPathSegmentTracing, blockSize1d >> > ( depth , num_paths , dev_paths , dev_geoms , hst_scene->geoms.size() , dev_intersections , dev_vertices ); checkCUDAError("trace one bounce"); #endif } else { computeIntersections << <numblocksPathSegmentTracing, blockSize1d >> > ( depth , num_paths , dev_paths , dev_geoms , hst_scene->geoms.size() , dev_intersections , dev_vertices ); checkCUDAError("trace one bounce"); cudaDeviceSynchronize(); } depth++; // TODO: // ---------------- Shading Stage ------------------ // Shade path segments based on intersections and generate new rays by // evaluating the BSDF. // Start off with just a big kernel that handles all the different // materials you have in the scenefile. // TODO: compare between directly shading the path segments and shading // path segments that have been reshuffled to be contiguous in memory. //The last //int num_materials = 5; //StreamCompaction::Radix::RadixSort_Path_Interactions(num_paths, dev_paths, dev_intersections, num_materials); #if Sorting_Toggle sortByMaterialId(num_paths, dev_paths, dev_intersections); #endif #if Direct_Light_Toggle shadeRealMaterial_Direct << <numblocksPathSegmentTracing, blockSize1d >> >( iter, num_paths, dev_intersections, dev_paths, dev_materials, dev_image, dev_flag_array, dev_lights_indices, hst_scene->lights_indices.size(), dev_geoms, hst_scene->geoms.size(), dev_vertices ); #else shadeRealMaterial << <numblocksPathSegmentTracing, blockSize1d >> >( iter, num_paths, dev_intersections, dev_paths, dev_materials, dev_image, dev_flag_array ); #endif //get new paths and new flag_array // ----------------- Stream Compaction ---------------- num_paths = compact_Paths(num_paths); //compressedPathandIntersection(num_paths, dev_paths, dev_flag_array); //get new path pool and num_paths if(num_paths <= 0) iterationComplete = true; // TODO: should be based off stream compaction results. } // Assemble this iteration and apply it to the image /*dim3 numBlocksPixels = (pixelcount + blockSize1d - 1) / blockSize1d; finalGather << <numBlocksPixels, blockSize1d >> >(num_paths, dev_image, dev_paths);*/ /////////////////////////////////////////////////////////////////////////// StreamCompaction::Efficient::timer().endGpuTimer(); //std::cout << " elapsed time: " << StreamCompaction::Efficient::timer().getGpuElapsedTimeForPreviousOperation() << " ms\n"; fprintf(fp, "%lf\n", StreamCompaction::Efficient::timer().getGpuElapsedTimeForPreviousOperation()); // Send results to OpenGL buffer for rendering sendImageToPBO << <blocksPerGrid2d, blockSize2d >> >(pbo, cam.resolution, iter, dev_image); // Retrieve image from GPU cudaMemcpy(hst_scene->state.image.data(), dev_image, pixelcount * sizeof(glm::vec3), cudaMemcpyDeviceToHost); checkCUDAError("pathtrace"); }
2378ffa9a65909fb20817658b5aede89fa5209a7.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" // // auto-generated by ops.py // __constant__ int xdim0_update_halo_kernel3_minus_4_b; int xdim0_update_halo_kernel3_minus_4_b_h = -1; __constant__ int ydim0_update_halo_kernel3_minus_4_b; int ydim0_update_halo_kernel3_minus_4_b_h = -1; __constant__ int xdim1_update_halo_kernel3_minus_4_b; int xdim1_update_halo_kernel3_minus_4_b_h = -1; __constant__ int ydim1_update_halo_kernel3_minus_4_b; int ydim1_update_halo_kernel3_minus_4_b_h = -1; #undef OPS_ACC0 #undef OPS_ACC1 #define OPS_ACC0(x,y,z) (x+xdim0_update_halo_kernel3_minus_4_b*(y)+xdim0_update_halo_kernel3_minus_4_b*ydim0_update_halo_kernel3_minus_4_b*(z)) #define OPS_ACC1(x,y,z) (x+xdim1_update_halo_kernel3_minus_4_b*(y)+xdim1_update_halo_kernel3_minus_4_b*ydim1_update_halo_kernel3_minus_4_b*(z)) //user function __device__ inline void update_halo_kernel3_minus_4_b_gpu(double *vol_flux_x, double *mass_flux_x, const int* fields) { if(fields[FIELD_VOL_FLUX_X] == 1) vol_flux_x[OPS_ACC0(0,0,0)] = -(vol_flux_x[OPS_ACC0(-4,0,0)]); if(fields[FIELD_MASS_FLUX_X] == 1) mass_flux_x[OPS_ACC1(0,0,0)] = -(mass_flux_x[OPS_ACC1(-4,0,0)]); } #undef OPS_ACC0 #undef OPS_ACC1 __global__ void ops_update_halo_kernel3_minus_4_b( double* __restrict arg0, double* __restrict arg1, const int* __restrict arg2, int size0, int size1, int size2 ){ int idx_z = blockDim.z * blockIdx.z + threadIdx.z; int idx_y = blockDim.y * blockIdx.y + threadIdx.y; int idx_x = blockDim.x * blockIdx.x + threadIdx.x; arg0 += idx_x * 1*1 + idx_y * 1*1 * xdim0_update_halo_kernel3_minus_4_b + idx_z * 1*1 * xdim0_update_halo_kernel3_minus_4_b * ydim0_update_halo_kernel3_minus_4_b; arg1 += idx_x * 1*1 + idx_y * 1*1 * xdim1_update_halo_kernel3_minus_4_b + idx_z * 1*1 * xdim1_update_halo_kernel3_minus_4_b * ydim1_update_halo_kernel3_minus_4_b; if (idx_x < size0 && idx_y < size1 && idx_z < size2) { update_halo_kernel3_minus_4_b_gpu(arg0, arg1, arg2); } } // host stub function #ifndef OPS_LAZY void ops_par_loop_update_halo_kernel3_minus_4_b(char const *name, ops_block block, int dim, int* range, ops_arg arg0, ops_arg arg1, ops_arg arg2) { #else void ops_par_loop_update_halo_kernel3_minus_4_b_execute(ops_kernel_descriptor *desc) { int dim = desc->dim; int *range = desc->range; ops_arg arg0 = desc->args[0]; ops_arg arg1 = desc->args[1]; ops_arg arg2 = desc->args[2]; #endif //Timing double t1,t2,c1,c2; ops_arg args[3] = { arg0, arg1, arg2}; #if CHECKPOINTING && !OPS_LAZY if (!ops_checkpointing_before(args,3,range,65)) return; #endif if (OPS_diags > 1) { ops_timing_realloc(65,"update_halo_kernel3_minus_4_b"); OPS_kernels[65].count++; ops_timers_core(&c1,&t1); } //compute locally allocated range for the sub-block int start[3]; int end[3]; #if OPS_MPI && !OPS_LAZY sub_block_list sb = OPS_sub_block_list[block->index]; if (!sb->owned) return; for ( int n=0; n<3; n++ ){ start[n] = sb->decomp_disp[n];end[n] = sb->decomp_disp[n]+sb->decomp_size[n]; if (start[n] >= range[2*n]) { start[n] = 0; } else { start[n] = range[2*n] - start[n]; } if (sb->id_m[n]==MPI_PROC_NULL && range[2*n] < 0) start[n] = range[2*n]; if (end[n] >= range[2*n+1]) { end[n] = range[2*n+1] - sb->decomp_disp[n]; } else { end[n] = sb->decomp_size[n]; } if (sb->id_p[n]==MPI_PROC_NULL && (range[2*n+1] > sb->decomp_disp[n]+sb->decomp_size[n])) end[n] += (range[2*n+1]-sb->decomp_disp[n]-sb->decomp_size[n]); } #else for ( int n=0; n<3; n++ ){ start[n] = range[2*n];end[n] = range[2*n+1]; } #endif int x_size = MAX(0,end[0]-start[0]); int y_size = MAX(0,end[1]-start[1]); int z_size = MAX(0,end[2]-start[2]); int xdim0 = args[0].dat->size[0]; int ydim0 = args[0].dat->size[1]; int xdim1 = args[1].dat->size[0]; int ydim1 = args[1].dat->size[1]; if (xdim0 != xdim0_update_halo_kernel3_minus_4_b_h || ydim0 != ydim0_update_halo_kernel3_minus_4_b_h || xdim1 != xdim1_update_halo_kernel3_minus_4_b_h || ydim1 != ydim1_update_halo_kernel3_minus_4_b_h) { hipMemcpyToSymbol( xdim0_update_halo_kernel3_minus_4_b, &xdim0, sizeof(int) ); xdim0_update_halo_kernel3_minus_4_b_h = xdim0; hipMemcpyToSymbol( ydim0_update_halo_kernel3_minus_4_b, &ydim0, sizeof(int) ); ydim0_update_halo_kernel3_minus_4_b_h = ydim0; hipMemcpyToSymbol( xdim1_update_halo_kernel3_minus_4_b, &xdim1, sizeof(int) ); xdim1_update_halo_kernel3_minus_4_b_h = xdim1; hipMemcpyToSymbol( ydim1_update_halo_kernel3_minus_4_b, &ydim1, sizeof(int) ); ydim1_update_halo_kernel3_minus_4_b_h = ydim1; } int *arg2h = (int *)arg2.data; dim3 grid( (x_size-1)/OPS_block_size_x+ 1, (y_size-1)/OPS_block_size_y + 1, (z_size-1)/OPS_block_size_z +1); dim3 tblock(OPS_block_size_x,OPS_block_size_y,OPS_block_size_z); int consts_bytes = 0; consts_bytes += ROUND_UP(NUM_FIELDS*sizeof(int)); reallocConstArrays(consts_bytes); consts_bytes = 0; arg2.data = OPS_consts_h + consts_bytes; arg2.data_d = OPS_consts_d + consts_bytes; for (int d=0; d<NUM_FIELDS; d++) ((int *)arg2.data)[d] = arg2h[d]; consts_bytes += ROUND_UP(NUM_FIELDS*sizeof(int)); mvConstArraysToDevice(consts_bytes); int dat0 = (OPS_soa ? args[0].dat->type_size : args[0].dat->elem_size); int dat1 = (OPS_soa ? args[1].dat->type_size : args[1].dat->elem_size); char *p_a[3]; //set up initial pointers int base0 = args[0].dat->base_offset + dat0 * 1 * (start[0] * args[0].stencil->stride[0]); base0 = base0+ dat0 * args[0].dat->size[0] * (start[1] * args[0].stencil->stride[1]); base0 = base0+ dat0 * args[0].dat->size[0] * args[0].dat->size[1] * (start[2] * args[0].stencil->stride[2]); p_a[0] = (char *)args[0].data_d + base0; int base1 = args[1].dat->base_offset + dat1 * 1 * (start[0] * args[1].stencil->stride[0]); base1 = base1+ dat1 * args[1].dat->size[0] * (start[1] * args[1].stencil->stride[1]); base1 = base1+ dat1 * args[1].dat->size[0] * args[1].dat->size[1] * (start[2] * args[1].stencil->stride[2]); p_a[1] = (char *)args[1].data_d + base1; #ifndef OPS_LAZY ops_H_D_exchanges_device(args, 3); ops_halo_exchanges(args,3,range); #endif if (OPS_diags > 1) { ops_timers_core(&c2,&t2); OPS_kernels[65].mpi_time += t2-t1; } //call kernel wrapper function, passing in pointers to data if (x_size > 0 && y_size > 0 && z_size > 0) hipLaunchKernelGGL(( ops_update_halo_kernel3_minus_4_b), dim3(grid), dim3(tblock) , 0, 0, (double *)p_a[0], (double *)p_a[1], (int *)arg2.data_d,x_size, y_size, z_size); cutilSafeCall(hipGetLastError()); if (OPS_diags>1) { cutilSafeCall(hipDeviceSynchronize()); ops_timers_core(&c1,&t1); OPS_kernels[65].time += t1-t2; } #ifndef OPS_LAZY ops_set_dirtybit_device(args, 3); ops_set_halo_dirtybit3(&args[0],range); ops_set_halo_dirtybit3(&args[1],range); #endif if (OPS_diags > 1) { //Update kernel record ops_timers_core(&c2,&t2); OPS_kernels[65].mpi_time += t2-t1; OPS_kernels[65].transfer += ops_compute_transfer(dim, start, end, &arg0); OPS_kernels[65].transfer += ops_compute_transfer(dim, start, end, &arg1); } } #ifdef OPS_LAZY void ops_par_loop_update_halo_kernel3_minus_4_b(char const *name, ops_block block, int dim, int* range, ops_arg arg0, ops_arg arg1, ops_arg arg2) { ops_kernel_descriptor *desc = (ops_kernel_descriptor *)malloc(sizeof(ops_kernel_descriptor)); desc->name = name; desc->block = block; desc->dim = dim; desc->device = 1; desc->index = 65; desc->hash = 5381; desc->hash = ((desc->hash << 5) + desc->hash) + 65; for ( int i=0; i<6; i++ ){ desc->range[i] = range[i]; desc->orig_range[i] = range[i]; desc->hash = ((desc->hash << 5) + desc->hash) + range[i]; } desc->nargs = 3; desc->args = (ops_arg*)malloc(3*sizeof(ops_arg)); desc->args[0] = arg0; desc->hash = ((desc->hash << 5) + desc->hash) + arg0.dat->index; desc->args[1] = arg1; desc->hash = ((desc->hash << 5) + desc->hash) + arg1.dat->index; desc->args[2] = arg2; char *tmp = (char*)malloc(NUM_FIELDS*sizeof(int)); memcpy(tmp, arg2.data,NUM_FIELDS*sizeof(int)); desc->args[2].data = tmp; desc->function = ops_par_loop_update_halo_kernel3_minus_4_b_execute; if (OPS_diags > 1) { ops_timing_realloc(65,"update_halo_kernel3_minus_4_b"); } ops_enqueue_kernel(desc); } #endif
2378ffa9a65909fb20817658b5aede89fa5209a7.cu
// // auto-generated by ops.py // __constant__ int xdim0_update_halo_kernel3_minus_4_b; int xdim0_update_halo_kernel3_minus_4_b_h = -1; __constant__ int ydim0_update_halo_kernel3_minus_4_b; int ydim0_update_halo_kernel3_minus_4_b_h = -1; __constant__ int xdim1_update_halo_kernel3_minus_4_b; int xdim1_update_halo_kernel3_minus_4_b_h = -1; __constant__ int ydim1_update_halo_kernel3_minus_4_b; int ydim1_update_halo_kernel3_minus_4_b_h = -1; #undef OPS_ACC0 #undef OPS_ACC1 #define OPS_ACC0(x,y,z) (x+xdim0_update_halo_kernel3_minus_4_b*(y)+xdim0_update_halo_kernel3_minus_4_b*ydim0_update_halo_kernel3_minus_4_b*(z)) #define OPS_ACC1(x,y,z) (x+xdim1_update_halo_kernel3_minus_4_b*(y)+xdim1_update_halo_kernel3_minus_4_b*ydim1_update_halo_kernel3_minus_4_b*(z)) //user function __device__ inline void update_halo_kernel3_minus_4_b_gpu(double *vol_flux_x, double *mass_flux_x, const int* fields) { if(fields[FIELD_VOL_FLUX_X] == 1) vol_flux_x[OPS_ACC0(0,0,0)] = -(vol_flux_x[OPS_ACC0(-4,0,0)]); if(fields[FIELD_MASS_FLUX_X] == 1) mass_flux_x[OPS_ACC1(0,0,0)] = -(mass_flux_x[OPS_ACC1(-4,0,0)]); } #undef OPS_ACC0 #undef OPS_ACC1 __global__ void ops_update_halo_kernel3_minus_4_b( double* __restrict arg0, double* __restrict arg1, const int* __restrict arg2, int size0, int size1, int size2 ){ int idx_z = blockDim.z * blockIdx.z + threadIdx.z; int idx_y = blockDim.y * blockIdx.y + threadIdx.y; int idx_x = blockDim.x * blockIdx.x + threadIdx.x; arg0 += idx_x * 1*1 + idx_y * 1*1 * xdim0_update_halo_kernel3_minus_4_b + idx_z * 1*1 * xdim0_update_halo_kernel3_minus_4_b * ydim0_update_halo_kernel3_minus_4_b; arg1 += idx_x * 1*1 + idx_y * 1*1 * xdim1_update_halo_kernel3_minus_4_b + idx_z * 1*1 * xdim1_update_halo_kernel3_minus_4_b * ydim1_update_halo_kernel3_minus_4_b; if (idx_x < size0 && idx_y < size1 && idx_z < size2) { update_halo_kernel3_minus_4_b_gpu(arg0, arg1, arg2); } } // host stub function #ifndef OPS_LAZY void ops_par_loop_update_halo_kernel3_minus_4_b(char const *name, ops_block block, int dim, int* range, ops_arg arg0, ops_arg arg1, ops_arg arg2) { #else void ops_par_loop_update_halo_kernel3_minus_4_b_execute(ops_kernel_descriptor *desc) { int dim = desc->dim; int *range = desc->range; ops_arg arg0 = desc->args[0]; ops_arg arg1 = desc->args[1]; ops_arg arg2 = desc->args[2]; #endif //Timing double t1,t2,c1,c2; ops_arg args[3] = { arg0, arg1, arg2}; #if CHECKPOINTING && !OPS_LAZY if (!ops_checkpointing_before(args,3,range,65)) return; #endif if (OPS_diags > 1) { ops_timing_realloc(65,"update_halo_kernel3_minus_4_b"); OPS_kernels[65].count++; ops_timers_core(&c1,&t1); } //compute locally allocated range for the sub-block int start[3]; int end[3]; #if OPS_MPI && !OPS_LAZY sub_block_list sb = OPS_sub_block_list[block->index]; if (!sb->owned) return; for ( int n=0; n<3; n++ ){ start[n] = sb->decomp_disp[n];end[n] = sb->decomp_disp[n]+sb->decomp_size[n]; if (start[n] >= range[2*n]) { start[n] = 0; } else { start[n] = range[2*n] - start[n]; } if (sb->id_m[n]==MPI_PROC_NULL && range[2*n] < 0) start[n] = range[2*n]; if (end[n] >= range[2*n+1]) { end[n] = range[2*n+1] - sb->decomp_disp[n]; } else { end[n] = sb->decomp_size[n]; } if (sb->id_p[n]==MPI_PROC_NULL && (range[2*n+1] > sb->decomp_disp[n]+sb->decomp_size[n])) end[n] += (range[2*n+1]-sb->decomp_disp[n]-sb->decomp_size[n]); } #else for ( int n=0; n<3; n++ ){ start[n] = range[2*n];end[n] = range[2*n+1]; } #endif int x_size = MAX(0,end[0]-start[0]); int y_size = MAX(0,end[1]-start[1]); int z_size = MAX(0,end[2]-start[2]); int xdim0 = args[0].dat->size[0]; int ydim0 = args[0].dat->size[1]; int xdim1 = args[1].dat->size[0]; int ydim1 = args[1].dat->size[1]; if (xdim0 != xdim0_update_halo_kernel3_minus_4_b_h || ydim0 != ydim0_update_halo_kernel3_minus_4_b_h || xdim1 != xdim1_update_halo_kernel3_minus_4_b_h || ydim1 != ydim1_update_halo_kernel3_minus_4_b_h) { cudaMemcpyToSymbol( xdim0_update_halo_kernel3_minus_4_b, &xdim0, sizeof(int) ); xdim0_update_halo_kernel3_minus_4_b_h = xdim0; cudaMemcpyToSymbol( ydim0_update_halo_kernel3_minus_4_b, &ydim0, sizeof(int) ); ydim0_update_halo_kernel3_minus_4_b_h = ydim0; cudaMemcpyToSymbol( xdim1_update_halo_kernel3_minus_4_b, &xdim1, sizeof(int) ); xdim1_update_halo_kernel3_minus_4_b_h = xdim1; cudaMemcpyToSymbol( ydim1_update_halo_kernel3_minus_4_b, &ydim1, sizeof(int) ); ydim1_update_halo_kernel3_minus_4_b_h = ydim1; } int *arg2h = (int *)arg2.data; dim3 grid( (x_size-1)/OPS_block_size_x+ 1, (y_size-1)/OPS_block_size_y + 1, (z_size-1)/OPS_block_size_z +1); dim3 tblock(OPS_block_size_x,OPS_block_size_y,OPS_block_size_z); int consts_bytes = 0; consts_bytes += ROUND_UP(NUM_FIELDS*sizeof(int)); reallocConstArrays(consts_bytes); consts_bytes = 0; arg2.data = OPS_consts_h + consts_bytes; arg2.data_d = OPS_consts_d + consts_bytes; for (int d=0; d<NUM_FIELDS; d++) ((int *)arg2.data)[d] = arg2h[d]; consts_bytes += ROUND_UP(NUM_FIELDS*sizeof(int)); mvConstArraysToDevice(consts_bytes); int dat0 = (OPS_soa ? args[0].dat->type_size : args[0].dat->elem_size); int dat1 = (OPS_soa ? args[1].dat->type_size : args[1].dat->elem_size); char *p_a[3]; //set up initial pointers int base0 = args[0].dat->base_offset + dat0 * 1 * (start[0] * args[0].stencil->stride[0]); base0 = base0+ dat0 * args[0].dat->size[0] * (start[1] * args[0].stencil->stride[1]); base0 = base0+ dat0 * args[0].dat->size[0] * args[0].dat->size[1] * (start[2] * args[0].stencil->stride[2]); p_a[0] = (char *)args[0].data_d + base0; int base1 = args[1].dat->base_offset + dat1 * 1 * (start[0] * args[1].stencil->stride[0]); base1 = base1+ dat1 * args[1].dat->size[0] * (start[1] * args[1].stencil->stride[1]); base1 = base1+ dat1 * args[1].dat->size[0] * args[1].dat->size[1] * (start[2] * args[1].stencil->stride[2]); p_a[1] = (char *)args[1].data_d + base1; #ifndef OPS_LAZY ops_H_D_exchanges_device(args, 3); ops_halo_exchanges(args,3,range); #endif if (OPS_diags > 1) { ops_timers_core(&c2,&t2); OPS_kernels[65].mpi_time += t2-t1; } //call kernel wrapper function, passing in pointers to data if (x_size > 0 && y_size > 0 && z_size > 0) ops_update_halo_kernel3_minus_4_b<<<grid, tblock >>> ( (double *)p_a[0], (double *)p_a[1], (int *)arg2.data_d,x_size, y_size, z_size); cutilSafeCall(cudaGetLastError()); if (OPS_diags>1) { cutilSafeCall(cudaDeviceSynchronize()); ops_timers_core(&c1,&t1); OPS_kernels[65].time += t1-t2; } #ifndef OPS_LAZY ops_set_dirtybit_device(args, 3); ops_set_halo_dirtybit3(&args[0],range); ops_set_halo_dirtybit3(&args[1],range); #endif if (OPS_diags > 1) { //Update kernel record ops_timers_core(&c2,&t2); OPS_kernels[65].mpi_time += t2-t1; OPS_kernels[65].transfer += ops_compute_transfer(dim, start, end, &arg0); OPS_kernels[65].transfer += ops_compute_transfer(dim, start, end, &arg1); } } #ifdef OPS_LAZY void ops_par_loop_update_halo_kernel3_minus_4_b(char const *name, ops_block block, int dim, int* range, ops_arg arg0, ops_arg arg1, ops_arg arg2) { ops_kernel_descriptor *desc = (ops_kernel_descriptor *)malloc(sizeof(ops_kernel_descriptor)); desc->name = name; desc->block = block; desc->dim = dim; desc->device = 1; desc->index = 65; desc->hash = 5381; desc->hash = ((desc->hash << 5) + desc->hash) + 65; for ( int i=0; i<6; i++ ){ desc->range[i] = range[i]; desc->orig_range[i] = range[i]; desc->hash = ((desc->hash << 5) + desc->hash) + range[i]; } desc->nargs = 3; desc->args = (ops_arg*)malloc(3*sizeof(ops_arg)); desc->args[0] = arg0; desc->hash = ((desc->hash << 5) + desc->hash) + arg0.dat->index; desc->args[1] = arg1; desc->hash = ((desc->hash << 5) + desc->hash) + arg1.dat->index; desc->args[2] = arg2; char *tmp = (char*)malloc(NUM_FIELDS*sizeof(int)); memcpy(tmp, arg2.data,NUM_FIELDS*sizeof(int)); desc->args[2].data = tmp; desc->function = ops_par_loop_update_halo_kernel3_minus_4_b_execute; if (OPS_diags > 1) { ops_timing_realloc(65,"update_halo_kernel3_minus_4_b"); } ops_enqueue_kernel(desc); } #endif
7b48effcc6d01e0ceb7ac2d1c47deb01838a8fad.hip
// !!! This is a file automatically generated by hipify!!! #include <iostream> #include <math.h> #include <hip/hip_runtime.h> #include <hip/hip_runtime.h> #include <sys/time.h> #include <hipfft.h> /* hipfftPlanMany function * - 2D FFT transform using 1D cuffts without transpositions * - compare with transposition version */ #define NX 512 #define BATCH NX #define TILE_DIM 16 using namespace std; __global__ void transposeNoBankConflicts(hipfftDoubleComplex *idata, hipfftDoubleComplex *odata, int width, int height) { __shared__ float tile[TILE_DIM][TILE_DIM+1]; int xIndex = blockIdx.x * TILE_DIM + threadIdx.x; int yIndex = blockIdx.y * TILE_DIM + threadIdx.y; int index_in = xIndex + (yIndex)*width; xIndex = blockIdx.y * TILE_DIM + threadIdx.x; yIndex = blockIdx.x * TILE_DIM + threadIdx.y; int index_out = xIndex + (yIndex)*height; tile[threadIdx.y][threadIdx.x] = idata[index_in].x; __syncthreads(); odata[index_out].x = tile[threadIdx.x][threadIdx.y]; // (*) transpose also the complex part of the matrix __syncthreads(); tile[threadIdx.y][threadIdx.x] = idata[index_in].y; __syncthreads(); odata[index_out].y = tile[threadIdx.x][threadIdx.y]; } int main(int argc, char *argv[]) { struct timeval tt1, tt2; int ms; float fms; // create cufft plan hipfftHandle plan; hipfftPlan1d(&plan, NX, HIPFFT_Z2Z, BATCH); // allocate hipfftDoubleComplex type host memory hipfftDoubleComplex *data; data = (hipfftDoubleComplex*)malloc(NX*BATCH * sizeof(hipfftDoubleComplex)); // data initialization for(int j=0 ; j < BATCH ; j++) for(int k=0 ; k < NX ; k++) { data[k + j*NX].x = sin(double(j)+double(k)); data[k + j*NX].y = cos(double(j)+double(k)); } // check initial value of a data element cout << "initial value = " << data[43].x << " + " << data[43].y << "i" << endl; // allocate hipfftDoubleComplex type device memory hipfftDoubleComplex *devPtr; hipMalloc((void**)&devPtr, sizeof(hipfftDoubleComplex)*NX*BATCH*2); // copy data to device memory hipMemcpy(devPtr, data, sizeof(hipfftDoubleComplex)*NX*BATCH, hipMemcpyHostToDevice); // create plans hipfftHandle plan1; hipfftHandle plan2; int inembed[1]; int onembed[1]; inembed[0] = 1; onembed[0] = 1; // (*) define FFT dimension array int n1d[3]= // (*) planning for X dimension hipfftPlanMany(&plan1, 1, n1d, ***, ***, ***, ***, ***, ***, HIPFFT_Z2Z, ***); // (*) planning for Y dimension hipfftPlanMany(&plan2, 1, n1d, ***, ***, ***, ***, ***, ***, HIPFFT_Z2Z, ***); hipDeviceSynchronize(); gettimeofday( &tt1, NULL ); // X transform hipfftExecZ2Z(plan1, devPtr, devPtr + NX*NX, HIPFFT_FORWARD); //Y transform hipfftExecZ2Z(plan2, devPtr + NX*NX, devPtr, HIPFFT_FORWARD); hipDeviceSynchronize(); gettimeofday( &tt2, NULL ); // timing ms = (tt2.tv_sec - tt1.tv_sec); ms = ms * 1000000 + (tt2.tv_usec - tt1.tv_usec); fms = ((double)ms)/1000000.f; cout << "Computation time = " << fms << " seconds" << endl; // runtime configuration parameters for transposition dim3 grid(NX/TILE_DIM,NX/TILE_DIM,1); dim3 threads(TILE_DIM,TILE_DIM,1); // (*) make inverse transform for (int tt=0; tt<2; tt++) { hipfftExecZ2Z(plan, devPtr, devPtr + NX*NX, HIPFFT_BACKWARD); hipLaunchKernelGGL(( transposeNoBankConflicts) , dim3(grid), dim3(threads) , 0, 0, devPtr + NX*NX, devPtr, NX, NX); } hipDeviceSynchronize(); gettimeofday( &tt1, NULL ); // transfer result back from device hipMemcpy(data, devPtr, sizeof(hipfftDoubleComplex)*NX*BATCH, hipMemcpyDeviceToHost); // (*) destroy cufft plan hipfftDestroy(plan); // free device memory hipFree(devPtr); // check initial value of the same data element. Initial and final values should match // after a forward and inverse transform. cout << "final value = " << data[43].x/double(NX*NX) << " + " << data[43].y/double(NX*NX) << "i" << endl; // free host memory free(data); // timing ms = (tt1.tv_sec - tt2.tv_sec); ms = ms * 1000000 + (tt1.tv_usec - tt2.tv_usec); fms = ((double)ms)/1000000.f; cout << "Computation time = " << fms << " seconds" << endl; }
7b48effcc6d01e0ceb7ac2d1c47deb01838a8fad.cu
#include <iostream> #include <math.h> #include <cuda.h> #include <cuda_runtime.h> #include <sys/time.h> #include <cufft.h> /* cufftPlanMany function * - 2D FFT transform using 1D cuffts without transpositions * - compare with transposition version */ #define NX 512 #define BATCH NX #define TILE_DIM 16 using namespace std; __global__ void transposeNoBankConflicts(cufftDoubleComplex *idata, cufftDoubleComplex *odata, int width, int height) { __shared__ float tile[TILE_DIM][TILE_DIM+1]; int xIndex = blockIdx.x * TILE_DIM + threadIdx.x; int yIndex = blockIdx.y * TILE_DIM + threadIdx.y; int index_in = xIndex + (yIndex)*width; xIndex = blockIdx.y * TILE_DIM + threadIdx.x; yIndex = blockIdx.x * TILE_DIM + threadIdx.y; int index_out = xIndex + (yIndex)*height; tile[threadIdx.y][threadIdx.x] = idata[index_in].x; __syncthreads(); odata[index_out].x = tile[threadIdx.x][threadIdx.y]; // (*) transpose also the complex part of the matrix __syncthreads(); tile[threadIdx.y][threadIdx.x] = idata[index_in].y; __syncthreads(); odata[index_out].y = tile[threadIdx.x][threadIdx.y]; } int main(int argc, char *argv[]) { struct timeval tt1, tt2; int ms; float fms; // create cufft plan cufftHandle plan; cufftPlan1d(&plan, NX, CUFFT_Z2Z, BATCH); // allocate cufftDoubleComplex type host memory cufftDoubleComplex *data; data = (cufftDoubleComplex*)malloc(NX*BATCH * sizeof(cufftDoubleComplex)); // data initialization for(int j=0 ; j < BATCH ; j++) for(int k=0 ; k < NX ; k++) { data[k + j*NX].x = sin(double(j)+double(k)); data[k + j*NX].y = cos(double(j)+double(k)); } // check initial value of a data element cout << "initial value = " << data[43].x << " + " << data[43].y << "i" << endl; // allocate cufftDoubleComplex type device memory cufftDoubleComplex *devPtr; cudaMalloc((void**)&devPtr, sizeof(cufftDoubleComplex)*NX*BATCH*2); // copy data to device memory cudaMemcpy(devPtr, data, sizeof(cufftDoubleComplex)*NX*BATCH, cudaMemcpyHostToDevice); // create plans cufftHandle plan1; cufftHandle plan2; int inembed[1]; int onembed[1]; inembed[0] = 1; onembed[0] = 1; // (*) define FFT dimension array int n1d[3]= // (*) planning for X dimension cufftPlanMany(&plan1, 1, n1d, ***, ***, ***, ***, ***, ***, CUFFT_Z2Z, ***); // (*) planning for Y dimension cufftPlanMany(&plan2, 1, n1d, ***, ***, ***, ***, ***, ***, CUFFT_Z2Z, ***); cudaThreadSynchronize(); gettimeofday( &tt1, NULL ); // X transform cufftExecZ2Z(plan1, devPtr, devPtr + NX*NX, CUFFT_FORWARD); //Y transform cufftExecZ2Z(plan2, devPtr + NX*NX, devPtr, CUFFT_FORWARD); cudaThreadSynchronize(); gettimeofday( &tt2, NULL ); // timing ms = (tt2.tv_sec - tt1.tv_sec); ms = ms * 1000000 + (tt2.tv_usec - tt1.tv_usec); fms = ((double)ms)/1000000.f; cout << "Computation time = " << fms << " seconds" << endl; // runtime configuration parameters for transposition dim3 grid(NX/TILE_DIM,NX/TILE_DIM,1); dim3 threads(TILE_DIM,TILE_DIM,1); // (*) make inverse transform for (int tt=0; tt<2; tt++) { cufftExecZ2Z(plan, devPtr, devPtr + NX*NX, CUFFT_INVERSE); transposeNoBankConflicts <<< grid, threads >>>(devPtr + NX*NX, devPtr, NX, NX); } cudaThreadSynchronize(); gettimeofday( &tt1, NULL ); // transfer result back from device cudaMemcpy(data, devPtr, sizeof(cufftDoubleComplex)*NX*BATCH, cudaMemcpyDeviceToHost); // (*) destroy cufft plan cufftDestroy(plan); // free device memory cudaFree(devPtr); // check initial value of the same data element. Initial and final values should match // after a forward and inverse transform. cout << "final value = " << data[43].x/double(NX*NX) << " + " << data[43].y/double(NX*NX) << "i" << endl; // free host memory free(data); // timing ms = (tt1.tv_sec - tt2.tv_sec); ms = ms * 1000000 + (tt1.tv_usec - tt2.tv_usec); fms = ((double)ms)/1000000.f; cout << "Computation time = " << fms << " seconds" << endl; }
43f8d03c004ed74af9b918451efd25ab9f27db4c.hip
// !!! This is a file automatically generated by hipify!!! #include <stdlib.h> #include <stdio.h> #include <string.h> #include <math.h> #include <assert.h> #include <omp.h> #include <hip/hip_runtime.h> #define THREADS_PER_DIM 16 #define BLOCKS_PER_DIM 16 #define THREADS_PER_BLOCK THREADS_PER_DIM*THREADS_PER_DIM #include "kmeans_hip_kernel.hip" //#define BLOCK_DELTA_REDUCE //#define BLOCK_CENTER_REDUCE #define CPU_DELTA_REDUCE #define CPU_CENTER_REDUCE extern "C" int setup(int argc, char** argv); /* function prototype */ // GLOBAL!!!!! unsigned int num_threads_perdim = THREADS_PER_DIM; /* sqrt(256) -- see references for this choice */ unsigned int num_blocks_perdim = BLOCKS_PER_DIM; /* temporary */ unsigned int num_threads = num_threads_perdim*num_threads_perdim; /* number of threads */ unsigned int num_blocks = num_blocks_perdim*num_blocks_perdim; /* number of blocks */ /* _d denotes it resides on the device */ int *membership_new; /* newly assignment membership */ float *feature_d; /* inverted data array */ float *feature_flipped_d; /* original (not inverted) data array */ int *membership_d; /* membership on the device */ float *block_new_centers; /* sum of points in a cluster (per block) */ float *clusters_d; /* cluster centers on the device */ float *block_clusters_d; /* per block calculation of cluster centers */ int *block_deltas_d; /* per block calculation of deltas */ /* -------------- allocateMemory() ------------------- */ /* allocate device memory, calculate number of blocks and threads, and invert the data array */ extern "C" void allocateMemory(int npoints, int nfeatures, int nclusters, float **features) { num_blocks = npoints / num_threads; if (npoints % num_threads > 0) /* defeat truncation */ num_blocks++; num_blocks_perdim = sqrt((double) num_blocks); while (num_blocks_perdim * num_blocks_perdim < num_blocks) // defeat truncation (should run once) num_blocks_perdim++; num_blocks = num_blocks_perdim*num_blocks_perdim; /* allocate memory for memory_new[] and initialize to -1 (host) */ membership_new = (int*) malloc(npoints * sizeof(int)); for(int i=0;i<npoints;i++) { membership_new[i] = -1; } /* allocate memory for block_new_centers[] (host) */ block_new_centers = (float *) malloc(nclusters*nfeatures*sizeof(float)); /* allocate memory for feature_flipped_d[][], feature_d[][] (device) */ hipMalloc((void**) &feature_flipped_d, npoints*nfeatures*sizeof(float)); hipMemcpy(feature_flipped_d, features[0], npoints*nfeatures*sizeof(float), hipMemcpyHostToDevice); hipMalloc((void**) &feature_d, npoints*nfeatures*sizeof(float)); /* invert the data array (kernel execution) */ hipLaunchKernelGGL(( invert_mapping), dim3(num_blocks),dim3(num_threads), 0, 0, feature_flipped_d,feature_d,npoints,nfeatures); /* allocate memory for membership_d[] and clusters_d[][] (device) */ hipMalloc((void**) &membership_d, npoints*sizeof(int)); hipMalloc((void**) &clusters_d, nclusters*nfeatures*sizeof(float)); #ifdef BLOCK_DELTA_REDUCE // allocate array to hold the per block deltas on the gpu side hipMalloc((void**) &block_deltas_d, num_blocks_perdim * num_blocks_perdim * sizeof(int)); //hipMemcpy(block_delta_d, &delta_h, sizeof(int), hipMemcpyHostToDevice); #endif #ifdef BLOCK_CENTER_REDUCE // allocate memory and copy to card cluster array in which to accumulate center points for the next iteration hipMalloc((void**) &block_clusters_d, num_blocks_perdim * num_blocks_perdim * nclusters * nfeatures * sizeof(float)); //hipMemcpy(new_clusters_d, new_centers[0], nclusters*nfeatures*sizeof(float), hipMemcpyHostToDevice); #endif } /* -------------- allocateMemory() end ------------------- */ /* -------------- deallocateMemory() ------------------- */ /* free host and device memory */ extern "C" void deallocateMemory() { free(membership_new); free(block_new_centers); hipFree(feature_d); hipFree(feature_flipped_d); hipFree(membership_d); hipFree(clusters_d); #ifdef BLOCK_CENTER_REDUCE hipFree(block_clusters_d); #endif #ifdef BLOCK_DELTA_REDUCE hipFree(block_deltas_d); #endif } /* -------------- deallocateMemory() end ------------------- */ //////////////////////////////////////////////////////////////////////////////// // Program main // int main( int argc, char** argv) { // make sure we're running on the big card hipSetDevice(1); // as done in the CUDA start/help document provided setup(argc, argv); } // // //////////////////////////////////////////////////////////////////////////////// /* ------------------- kmeansCuda() ------------------------ */ extern "C" int // delta -- had problems when return value was of float type kmeansCuda(float **feature, /* in: [npoints][nfeatures] */ int nfeatures, /* number of attributes for each point */ int npoints, /* number of data points */ int nclusters, /* number of clusters */ int *membership, /* which cluster the point belongs to */ float **clusters, /* coordinates of cluster centers */ int *new_centers_len, /* number of elements in each cluster */ float **new_centers /* sum of elements in each cluster */ ) { int delta = 0; /* if point has moved */ int i,j; /* counters */ hipSetDevice(1); /* copy membership (host to device) */ hipMemcpy(membership_d, membership_new, npoints*sizeof(int), hipMemcpyHostToDevice); /* copy clusters (host to device) */ hipMemcpy(clusters_d, clusters[0], nclusters*nfeatures*sizeof(float), hipMemcpyHostToDevice); /* set up texture */ hipChannelFormatDesc chDesc0 = hipCreateChannelDesc<float>(); t_features.filterMode = hipFilterModePoint; t_features.normalized = false; t_features.channelDesc = chDesc0; if(hipBindTexture(NULL, &t_features, feature_d, &chDesc0, npoints*nfeatures*sizeof(float)) != hipSuccess) printf("Couldn't bind features array to texture!\n"); hipChannelFormatDesc chDesc1 = hipCreateChannelDesc<float>(); t_features_flipped.filterMode = hipFilterModePoint; t_features_flipped.normalized = false; t_features_flipped.channelDesc = chDesc1; if(hipBindTexture(NULL, &t_features_flipped, feature_flipped_d, &chDesc1, npoints*nfeatures*sizeof(float)) != hipSuccess) printf("Couldn't bind features_flipped array to texture!\n"); hipChannelFormatDesc chDesc2 = hipCreateChannelDesc<float>(); t_clusters.filterMode = hipFilterModePoint; t_clusters.normalized = false; t_clusters.channelDesc = chDesc2; if(hipBindTexture(NULL, &t_clusters, clusters_d, &chDesc2, nclusters*nfeatures*sizeof(float)) != hipSuccess) printf("Couldn't bind clusters array to texture!\n"); /* copy clusters to constant memory */ hipMemcpyToSymbol("c_clusters",clusters[0],nclusters*nfeatures*sizeof(float),0,hipMemcpyHostToDevice); /* setup execution parameters. changed to 2d (source code on NVIDIA CUDA Programming Guide) */ dim3 grid( num_blocks_perdim, num_blocks_perdim ); dim3 threads( num_threads_perdim*num_threads_perdim ); /* execute the kernel */ hipLaunchKernelGGL(( kmeansPoint), dim3(grid), dim3(threads) , 0, 0, feature_d, nfeatures, npoints, nclusters, membership_d, clusters_d, block_clusters_d, block_deltas_d); hipDeviceSynchronize(); /* copy back membership (device to host) */ hipMemcpy(membership_new, membership_d, npoints*sizeof(int), hipMemcpyDeviceToHost); #ifdef BLOCK_CENTER_REDUCE /*** Copy back arrays of per block sums ***/ float * block_clusters_h = (float *) malloc( num_blocks_perdim * num_blocks_perdim * nclusters * nfeatures * sizeof(float)); hipMemcpy(block_clusters_h, block_clusters_d, num_blocks_perdim * num_blocks_perdim * nclusters * nfeatures * sizeof(float), hipMemcpyDeviceToHost); #endif #ifdef BLOCK_DELTA_REDUCE int * block_deltas_h = (int *) malloc( num_blocks_perdim * num_blocks_perdim * sizeof(int)); hipMemcpy(block_deltas_h, block_deltas_d, num_blocks_perdim * num_blocks_perdim * sizeof(int), hipMemcpyDeviceToHost); #endif /* for each point, sum data points in each cluster and see if membership has changed: if so, increase delta and change old membership, and update new_centers; otherwise, update new_centers */ delta = 0; for (i = 0; i < npoints; i++) { int cluster_id = membership_new[i]; new_centers_len[cluster_id]++; if (membership_new[i] != membership[i]) { #ifdef CPU_DELTA_REDUCE delta++; #endif membership[i] = membership_new[i]; } #ifdef CPU_CENTER_REDUCE for (j = 0; j < nfeatures; j++) { new_centers[cluster_id][j] += feature[i][j]; } #endif } #ifdef BLOCK_DELTA_REDUCE /*** calculate global sums from per block sums for delta and the new centers ***/ //debug //printf("\t \t reducing %d block sums to global sum \n",num_blocks_perdim * num_blocks_perdim); for(i = 0; i < num_blocks_perdim * num_blocks_perdim; i++) { //printf("block %d delta is %d \n",i,block_deltas_h[i]); delta += block_deltas_h[i]; } #endif #ifdef BLOCK_CENTER_REDUCE for(int j = 0; j < nclusters;j++) { for(int k = 0; k < nfeatures;k++) { block_new_centers[j*nfeatures + k] = 0.f; } } for(i = 0; i < num_blocks_perdim * num_blocks_perdim; i++) { for(int j = 0; j < nclusters;j++) { for(int k = 0; k < nfeatures;k++) { block_new_centers[j*nfeatures + k] += block_clusters_h[i * nclusters*nfeatures + j * nfeatures + k]; } } } #ifdef CPU_CENTER_REDUCE //debug /*for(int j = 0; j < nclusters;j++) { for(int k = 0; k < nfeatures;k++) { if(new_centers[j][k] > 1.001 * block_new_centers[j*nfeatures + k] || new_centers[j][k] < 0.999 * block_new_centers[j*nfeatures + k]) { printf("\t \t for %d:%d, normal value is %e and gpu reduced value id %e \n",j,k,new_centers[j][k],block_new_centers[j*nfeatures + k]); } } }*/ #endif #ifdef BLOCK_CENTER_REDUCE for(int j = 0; j < nclusters;j++) { for(int k = 0; k < nfeatures;k++) new_centers[j][k]= block_new_centers[j*nfeatures + k]; } #endif #endif return delta; } /* ------------------- kmeansCuda() end ------------------------ */
43f8d03c004ed74af9b918451efd25ab9f27db4c.cu
#include <stdlib.h> #include <stdio.h> #include <string.h> #include <math.h> #include <assert.h> #include <omp.h> #include <cuda.h> #define THREADS_PER_DIM 16 #define BLOCKS_PER_DIM 16 #define THREADS_PER_BLOCK THREADS_PER_DIM*THREADS_PER_DIM #include "kmeans_cuda_kernel.cu" //#define BLOCK_DELTA_REDUCE //#define BLOCK_CENTER_REDUCE #define CPU_DELTA_REDUCE #define CPU_CENTER_REDUCE extern "C" int setup(int argc, char** argv); /* function prototype */ // GLOBAL!!!!! unsigned int num_threads_perdim = THREADS_PER_DIM; /* sqrt(256) -- see references for this choice */ unsigned int num_blocks_perdim = BLOCKS_PER_DIM; /* temporary */ unsigned int num_threads = num_threads_perdim*num_threads_perdim; /* number of threads */ unsigned int num_blocks = num_blocks_perdim*num_blocks_perdim; /* number of blocks */ /* _d denotes it resides on the device */ int *membership_new; /* newly assignment membership */ float *feature_d; /* inverted data array */ float *feature_flipped_d; /* original (not inverted) data array */ int *membership_d; /* membership on the device */ float *block_new_centers; /* sum of points in a cluster (per block) */ float *clusters_d; /* cluster centers on the device */ float *block_clusters_d; /* per block calculation of cluster centers */ int *block_deltas_d; /* per block calculation of deltas */ /* -------------- allocateMemory() ------------------- */ /* allocate device memory, calculate number of blocks and threads, and invert the data array */ extern "C" void allocateMemory(int npoints, int nfeatures, int nclusters, float **features) { num_blocks = npoints / num_threads; if (npoints % num_threads > 0) /* defeat truncation */ num_blocks++; num_blocks_perdim = sqrt((double) num_blocks); while (num_blocks_perdim * num_blocks_perdim < num_blocks) // defeat truncation (should run once) num_blocks_perdim++; num_blocks = num_blocks_perdim*num_blocks_perdim; /* allocate memory for memory_new[] and initialize to -1 (host) */ membership_new = (int*) malloc(npoints * sizeof(int)); for(int i=0;i<npoints;i++) { membership_new[i] = -1; } /* allocate memory for block_new_centers[] (host) */ block_new_centers = (float *) malloc(nclusters*nfeatures*sizeof(float)); /* allocate memory for feature_flipped_d[][], feature_d[][] (device) */ cudaMalloc((void**) &feature_flipped_d, npoints*nfeatures*sizeof(float)); cudaMemcpy(feature_flipped_d, features[0], npoints*nfeatures*sizeof(float), cudaMemcpyHostToDevice); cudaMalloc((void**) &feature_d, npoints*nfeatures*sizeof(float)); /* invert the data array (kernel execution) */ invert_mapping<<<num_blocks,num_threads>>>(feature_flipped_d,feature_d,npoints,nfeatures); /* allocate memory for membership_d[] and clusters_d[][] (device) */ cudaMalloc((void**) &membership_d, npoints*sizeof(int)); cudaMalloc((void**) &clusters_d, nclusters*nfeatures*sizeof(float)); #ifdef BLOCK_DELTA_REDUCE // allocate array to hold the per block deltas on the gpu side cudaMalloc((void**) &block_deltas_d, num_blocks_perdim * num_blocks_perdim * sizeof(int)); //cudaMemcpy(block_delta_d, &delta_h, sizeof(int), cudaMemcpyHostToDevice); #endif #ifdef BLOCK_CENTER_REDUCE // allocate memory and copy to card cluster array in which to accumulate center points for the next iteration cudaMalloc((void**) &block_clusters_d, num_blocks_perdim * num_blocks_perdim * nclusters * nfeatures * sizeof(float)); //cudaMemcpy(new_clusters_d, new_centers[0], nclusters*nfeatures*sizeof(float), cudaMemcpyHostToDevice); #endif } /* -------------- allocateMemory() end ------------------- */ /* -------------- deallocateMemory() ------------------- */ /* free host and device memory */ extern "C" void deallocateMemory() { free(membership_new); free(block_new_centers); cudaFree(feature_d); cudaFree(feature_flipped_d); cudaFree(membership_d); cudaFree(clusters_d); #ifdef BLOCK_CENTER_REDUCE cudaFree(block_clusters_d); #endif #ifdef BLOCK_DELTA_REDUCE cudaFree(block_deltas_d); #endif } /* -------------- deallocateMemory() end ------------------- */ //////////////////////////////////////////////////////////////////////////////// // Program main // int main( int argc, char** argv) { // make sure we're running on the big card cudaSetDevice(1); // as done in the CUDA start/help document provided setup(argc, argv); } // // //////////////////////////////////////////////////////////////////////////////// /* ------------------- kmeansCuda() ------------------------ */ extern "C" int // delta -- had problems when return value was of float type kmeansCuda(float **feature, /* in: [npoints][nfeatures] */ int nfeatures, /* number of attributes for each point */ int npoints, /* number of data points */ int nclusters, /* number of clusters */ int *membership, /* which cluster the point belongs to */ float **clusters, /* coordinates of cluster centers */ int *new_centers_len, /* number of elements in each cluster */ float **new_centers /* sum of elements in each cluster */ ) { int delta = 0; /* if point has moved */ int i,j; /* counters */ cudaSetDevice(1); /* copy membership (host to device) */ cudaMemcpy(membership_d, membership_new, npoints*sizeof(int), cudaMemcpyHostToDevice); /* copy clusters (host to device) */ cudaMemcpy(clusters_d, clusters[0], nclusters*nfeatures*sizeof(float), cudaMemcpyHostToDevice); /* set up texture */ cudaChannelFormatDesc chDesc0 = cudaCreateChannelDesc<float>(); t_features.filterMode = cudaFilterModePoint; t_features.normalized = false; t_features.channelDesc = chDesc0; if(cudaBindTexture(NULL, &t_features, feature_d, &chDesc0, npoints*nfeatures*sizeof(float)) != CUDA_SUCCESS) printf("Couldn't bind features array to texture!\n"); cudaChannelFormatDesc chDesc1 = cudaCreateChannelDesc<float>(); t_features_flipped.filterMode = cudaFilterModePoint; t_features_flipped.normalized = false; t_features_flipped.channelDesc = chDesc1; if(cudaBindTexture(NULL, &t_features_flipped, feature_flipped_d, &chDesc1, npoints*nfeatures*sizeof(float)) != CUDA_SUCCESS) printf("Couldn't bind features_flipped array to texture!\n"); cudaChannelFormatDesc chDesc2 = cudaCreateChannelDesc<float>(); t_clusters.filterMode = cudaFilterModePoint; t_clusters.normalized = false; t_clusters.channelDesc = chDesc2; if(cudaBindTexture(NULL, &t_clusters, clusters_d, &chDesc2, nclusters*nfeatures*sizeof(float)) != CUDA_SUCCESS) printf("Couldn't bind clusters array to texture!\n"); /* copy clusters to constant memory */ cudaMemcpyToSymbol("c_clusters",clusters[0],nclusters*nfeatures*sizeof(float),0,cudaMemcpyHostToDevice); /* setup execution parameters. changed to 2d (source code on NVIDIA CUDA Programming Guide) */ dim3 grid( num_blocks_perdim, num_blocks_perdim ); dim3 threads( num_threads_perdim*num_threads_perdim ); /* execute the kernel */ kmeansPoint<<< grid, threads >>>( feature_d, nfeatures, npoints, nclusters, membership_d, clusters_d, block_clusters_d, block_deltas_d); cudaThreadSynchronize(); /* copy back membership (device to host) */ cudaMemcpy(membership_new, membership_d, npoints*sizeof(int), cudaMemcpyDeviceToHost); #ifdef BLOCK_CENTER_REDUCE /*** Copy back arrays of per block sums ***/ float * block_clusters_h = (float *) malloc( num_blocks_perdim * num_blocks_perdim * nclusters * nfeatures * sizeof(float)); cudaMemcpy(block_clusters_h, block_clusters_d, num_blocks_perdim * num_blocks_perdim * nclusters * nfeatures * sizeof(float), cudaMemcpyDeviceToHost); #endif #ifdef BLOCK_DELTA_REDUCE int * block_deltas_h = (int *) malloc( num_blocks_perdim * num_blocks_perdim * sizeof(int)); cudaMemcpy(block_deltas_h, block_deltas_d, num_blocks_perdim * num_blocks_perdim * sizeof(int), cudaMemcpyDeviceToHost); #endif /* for each point, sum data points in each cluster and see if membership has changed: if so, increase delta and change old membership, and update new_centers; otherwise, update new_centers */ delta = 0; for (i = 0; i < npoints; i++) { int cluster_id = membership_new[i]; new_centers_len[cluster_id]++; if (membership_new[i] != membership[i]) { #ifdef CPU_DELTA_REDUCE delta++; #endif membership[i] = membership_new[i]; } #ifdef CPU_CENTER_REDUCE for (j = 0; j < nfeatures; j++) { new_centers[cluster_id][j] += feature[i][j]; } #endif } #ifdef BLOCK_DELTA_REDUCE /*** calculate global sums from per block sums for delta and the new centers ***/ //debug //printf("\t \t reducing %d block sums to global sum \n",num_blocks_perdim * num_blocks_perdim); for(i = 0; i < num_blocks_perdim * num_blocks_perdim; i++) { //printf("block %d delta is %d \n",i,block_deltas_h[i]); delta += block_deltas_h[i]; } #endif #ifdef BLOCK_CENTER_REDUCE for(int j = 0; j < nclusters;j++) { for(int k = 0; k < nfeatures;k++) { block_new_centers[j*nfeatures + k] = 0.f; } } for(i = 0; i < num_blocks_perdim * num_blocks_perdim; i++) { for(int j = 0; j < nclusters;j++) { for(int k = 0; k < nfeatures;k++) { block_new_centers[j*nfeatures + k] += block_clusters_h[i * nclusters*nfeatures + j * nfeatures + k]; } } } #ifdef CPU_CENTER_REDUCE //debug /*for(int j = 0; j < nclusters;j++) { for(int k = 0; k < nfeatures;k++) { if(new_centers[j][k] > 1.001 * block_new_centers[j*nfeatures + k] || new_centers[j][k] < 0.999 * block_new_centers[j*nfeatures + k]) { printf("\t \t for %d:%d, normal value is %e and gpu reduced value id %e \n",j,k,new_centers[j][k],block_new_centers[j*nfeatures + k]); } } }*/ #endif #ifdef BLOCK_CENTER_REDUCE for(int j = 0; j < nclusters;j++) { for(int k = 0; k < nfeatures;k++) new_centers[j][k]= block_new_centers[j*nfeatures + k]; } #endif #endif return delta; } /* ------------------- kmeansCuda() end ------------------------ */
8ff575dd5904623257df3fae8ae706d2d163fd43.hip
// !!! This is a file automatically generated by hipify!!! /*------------------------------------------------------------------------- * * CUDA functions for texture-memory interpolation based projection * * This file has the necesary fucntiosn to perform X-ray CBCT projection * operation given a geaometry, angles and image. It uses the 3D texture * memory linear interpolation to uniformily sample a path to integrate the * X-rays. * * CODE by Ander Biguri * Sepideh Hatamikia (arbitrary rotation) * --------------------------------------------------------------------------- * --------------------------------------------------------------------------- * Copyright (c) 2015, University of Bath and CERN- European Organization for * Nuclear Research * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are met: * * 1. Redistributions of source code must retain the above copyright notice, * this list of conditions and the following disclaimer. * * 2. Redistributions in binary form must reproduce the above copyright notice, * this list of conditions and the following disclaimer in the documentation * and/or other materials provided with the distribution. * * 3. Neither the name of the copyright holder nor the names of its contributors * may be used to endorse or promote products derived from this software without * specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE * POSSIBILITY OF SUCH DAMAGE. * --------------------------------------------------------------------------- * * Contact: [email protected] * Codes : https://github.com/CERN/TIGRE * --------------------------------------------------------------------------- */ #include <algorithm> #include <hip/hip_runtime_api.h> #include <hip/hip_runtime.h> #include "ray_interpolated_projection.hpp" #include "TIGRE_common.hpp" #include <math.h> #define cudaCheckErrors(msg) \ do { \ hipError_t __err = hipGetLastError(); \ if (__err != hipSuccess) { \ mexPrintf("%s \n",msg);\ hipDeviceReset();\ mexErrMsgIdAndTxt("TIGRE:Ax:interpolated",hipGetErrorString(__err));\ } \ } while (0) // Declare the texture reference. #define MAXTREADS 1024 #define PROJ_PER_BLOCK 9 #define PIXEL_SIZE_BLOCK 9 /*GEOMETRY DEFINITION * * Detector plane, behind * |-----------------------------| * | | * | | * | | * | | * | +--------+ | * | / /| | * A Z | / / |*D | * | | +--------+ | | * | | | | | | * | | | *O | + | * --->y | | | / | * / | | |/ | * V X | +--------+ | * |-----------------------------| * * *S * * * * * **/ void CreateTextureInterp(int num_devices,const float* imagedata,Geometry geo,hipArray** d_cuArrTex, hipTextureObject_t *texImage,bool allocate); __constant__ Point3D projParamsArrayDev[4*PROJ_PER_BLOCK]; // Dev means it is on device __constant__ float projFloatsArrayDev[2*PROJ_PER_BLOCK]; // Dev means it is on device __global__ void vecAddInPlaceInterp(float *a, float *b, unsigned long n) { int idx = blockIdx.x*blockDim.x+threadIdx.x; // Make sure we do not go out of bounds if (idx < n) a[idx] = a[idx] + b[idx]; } template<bool sphericalrotation> __global__ void kernelPixelDetector( Geometry geo, float* detector, const int currProjSetNumber, const int totalNoOfProjections, hipTextureObject_t tex){ unsigned long u = blockIdx.x * blockDim.x + threadIdx.x; unsigned long v = blockIdx.y * blockDim.y + threadIdx.y; unsigned long projNumber=threadIdx.z; if (u>= geo.nDetecU || v>= geo.nDetecV || projNumber>=PROJ_PER_BLOCK) return; #if IS_FOR_MATLAB_TIGRE size_t idx = (size_t)(u * geo.nDetecV + v)+ (size_t)projNumber*geo.nDetecV *geo.nDetecU ; #else size_t idx = (size_t)(v * geo.nDetecU + u)+ (size_t)projNumber*geo.nDetecV *geo.nDetecU ; #endif int indAlpha = currProjSetNumber*PROJ_PER_BLOCK+projNumber; // This is the ABSOLUTE projection number in the projection array if(indAlpha>=totalNoOfProjections) return; Point3D uvOrigin = projParamsArrayDev[4*projNumber]; // 6*projNumber because we have 6 Point3D values per projection Point3D deltaU = projParamsArrayDev[4*projNumber+1]; Point3D deltaV = projParamsArrayDev[4*projNumber+2]; Point3D source = projParamsArrayDev[4*projNumber+3]; float DSO = projFloatsArrayDev[2*projNumber+0]; float cropdist_init = projFloatsArrayDev[2*projNumber+1]; /////// Get coordinates XYZ of pixel UV int pixelV = geo.nDetecV-v-1; int pixelU = u; float vectX,vectY,vectZ; Point3D P; P.x=(uvOrigin.x+pixelU*deltaU.x+pixelV*deltaV.x); P.y=(uvOrigin.y+pixelU*deltaU.y+pixelV*deltaV.y); P.z=(uvOrigin.z+pixelU*deltaU.z+pixelV*deltaV.z); // Length is the ray length in normalized space float length=__fsqrt_rd((source.x-P.x)*(source.x-P.x)+(source.y-P.y)*(source.y-P.y)+(source.z-P.z)*(source.z-P.z)); //now legth is an integer of Nsamples that are required on this line length=ceilf(__fdividef(length,geo.accuracy));//Divide the directional vector by an integer vectX=__fdividef(P.x -source.x,length); vectY=__fdividef(P.y -source.y,length); vectZ=__fdividef(P.z -source.z,length); // //Integrate over the line float tx,ty,tz; float sum=0; float i; // Because I have no idea how to efficiently cutoff the legth path in 3D, a very upper limit is computed (see maxdistanceCuboid) // for the 3D case. However it would be bad to lose performance in the 3D case // TODO: can ge really improve this? if (sphericalrotation){ if ((2*DSO/fminf(fminf(geo.dVoxelX,geo.dVoxelY),geo.dVoxelZ)+cropdist_init)/geo.accuracy < length) length=ceilf((2*DSO/fminf(fminf(geo.dVoxelX,geo.dVoxelY),geo.dVoxelZ)+cropdist_init)/geo.accuracy); } else{ if ((2*DSO/fminf(geo.dVoxelX,geo.dVoxelY)+cropdist_init)/geo.accuracy < length) length=ceilf((2*DSO/fminf(geo.dVoxelX,geo.dVoxelY)+cropdist_init)/geo.accuracy); } //Length is not actually a length, but the amount of memreads with given accuracy ("samples per voxel") for (i=floorf(cropdist_init/geo.accuracy); i<=length; i=i+1){ tx=vectX*i+source.x; ty=vectY*i+source.y; tz=vectZ*i+source.z; sum += tex3D<float>(tex, tx+0.5f, ty+0.5f, tz+0.5f); // this line is 94% of time. } float deltalength=sqrtf((vectX*geo.dVoxelX)*(vectX*geo.dVoxelX)+ (vectY*geo.dVoxelY)*(vectY*geo.dVoxelY)+ (vectZ*geo.dVoxelZ)*(vectZ*geo.dVoxelZ) ); detector[idx]=sum*deltalength; } // legnth(angles)=3 x nagnles, as we have roll, pitch, yaw. int interpolation_projection(float * img, Geometry geo, float** result,float const * const angles,int nangles){ // Prepare for MultiGPU int deviceCount = 0; hipGetDeviceCount(&deviceCount); cudaCheckErrors("Device query fail"); if (deviceCount == 0) { mexErrMsgIdAndTxt("Ax:Interpolated_projection:GPUselect","There are no available device(s) that support CUDA\n"); } // // CODE assumes // 1.-All available devices are usable by this code // 2.-All available devices are equal, they are the same machine (warning trhown) int dev; const int devicenamelength = 256; // The length 256 is fixed by spec of hipDeviceProp_t::name char devicename[devicenamelength]; hipDeviceProp_t deviceProp; for (dev = 0; dev < deviceCount; dev++) { hipSetDevice(dev); hipGetDeviceProperties(&deviceProp, dev); if (dev>0){ if (strcmp(devicename,deviceProp.name)!=0){ mexWarnMsgIdAndTxt("Ax:GPUselect","Detected one (or more) different GPUs.\n This code is not smart enough to separate the memory GPU wise if they have different computational times or memory limits.\n First GPU parameters used. If the code errors you might need to change the way GPU selection is performed. \n Siddon_projection.cu line 275."); break; } } memset(devicename, 0, devicenamelength); strcpy(devicename, deviceProp.name); } // Check free memory size_t mem_GPU_global; checkFreeMemory(deviceCount,&mem_GPU_global); // printf("geo.nDetec (U, V) = %d, %d\n", geo.nDetecU, geo.nDetecV); size_t mem_image=(unsigned long long)geo.nVoxelX*(unsigned long long)geo.nVoxelY*(unsigned long long)geo.nVoxelZ*sizeof(float); size_t mem_proj =(unsigned long long)geo.nDetecU*(unsigned long long)geo.nDetecV * sizeof(float); // Does everything fit in the GPUs? const bool fits_in_memory = mem_image+2*PROJ_PER_BLOCK*mem_proj<mem_GPU_global; unsigned int splits=1; if (!fits_in_memory) { // Nope nope. // approx free memory we have. We already have left some extra 5% free for internal stuff // we need a second projection memory to combine multi-GPU stuff. size_t mem_free=mem_GPU_global-4*PROJ_PER_BLOCK*mem_proj; splits=mem_image/mem_free+1;// Ceil of the truncation } Geometry* geoArray = (Geometry*)malloc(splits*sizeof(Geometry)); splitImageInterp(splits,geo,geoArray,nangles); // Allocate auiliary memory for projections on the GPU to accumulate partial resutsl float ** dProjection_accum; size_t num_bytes_proj = PROJ_PER_BLOCK*geo.nDetecU*geo.nDetecV * sizeof(float); if (!fits_in_memory){ dProjection_accum=(float**)malloc(2*deviceCount*sizeof(float*)); for (dev = 0; dev < deviceCount; dev++) { hipSetDevice(dev); for (int i = 0; i < 2; ++i){ hipMalloc((void**)&dProjection_accum[dev*2+i], num_bytes_proj); hipMemset(dProjection_accum[dev*2+i],0,num_bytes_proj); cudaCheckErrors("cudaMallocauxiliarty projections fail"); } } } // This is happening regarthless if the image fits on memory float** dProjection=(float**)malloc(2*deviceCount*sizeof(float*)); for (dev = 0; dev < deviceCount; dev++){ hipSetDevice(dev); for (int i = 0; i < 2; ++i){ hipMalloc((void**)&dProjection[dev*2+i], num_bytes_proj); hipMemset(dProjection[dev*2+i] ,0,num_bytes_proj); cudaCheckErrors("hipMalloc projections fail"); } } //Pagelock memory for syncronous copy. // Lets try to make the host memory pinned: // We laredy queried the GPU and assuemd they are the same, thus shoudl have the same attributes. int isHostRegisterSupported; hipDeviceGetAttribute(&isHostRegisterSupported,hipDeviceAttributeHostRegisterSupported,0); // empirical testing shows that when the image split is smaller than 1 (also implies the image is not very big), the time to // pin the memory is greater than the lost time in Syncronously launching the memcpys. This is only worth it when the image is too big. if (isHostRegisterSupported & splits>1){ hipHostRegister(img, (size_t)geo.nVoxelX*(size_t)geo.nVoxelY*(size_t)geo.nVoxelZ*(size_t)sizeof(float),hipHostRegisterPortable); } cudaCheckErrors("Error pinning memory"); Point3D source, deltaU, deltaV, uvOrigin; Point3D* projParamsArrayHost = 0; hipHostMalloc((void**)&projParamsArrayHost,4*PROJ_PER_BLOCK*sizeof(Point3D)); float* projFloatsArrayHost = 0; hipHostMalloc((void**)&projFloatsArrayHost,2*PROJ_PER_BLOCK*sizeof(float)); cudaCheckErrors("Error allocating auxiliary constant memory"); // Create Streams for overlapping memcopy and compute int nStream_device=2; int nStreams=deviceCount*nStream_device; hipStream_t* stream=(hipStream_t*)malloc(nStreams*sizeof(hipStream_t)); for (dev = 0; dev < deviceCount; dev++){ hipSetDevice(dev); for (int i = 0; i < nStream_device; ++i){ hipStreamCreate(&stream[i+dev*nStream_device]); } } cudaCheckErrors("Stream creation fail"); int nangles_device=(nangles+deviceCount-1)/deviceCount; int nangles_last_device=(nangles-(deviceCount-1)*nangles_device); unsigned int noOfKernelCalls = (nangles_device+PROJ_PER_BLOCK-1)/PROJ_PER_BLOCK; // We'll take care of bounds checking inside the loop if nalpha is not divisible by PROJ_PER_BLOCK unsigned int noOfKernelCallsLastDev = (nangles_last_device+PROJ_PER_BLOCK-1)/PROJ_PER_BLOCK; // we will use this in the memory management. int projection_this_block; hipTextureObject_t *texImg = new hipTextureObject_t[deviceCount]; hipArray **d_cuArrTex = new hipArray*[deviceCount]; for (unsigned int sp=0;sp<splits;sp++){ // Create texture objects for all GPUs size_t linear_idx_start; // They are all the same size, except the last one. linear_idx_start= (size_t)sp*(size_t)geoArray[0].nVoxelX*(size_t)geoArray[0].nVoxelY*(size_t)geoArray[0].nVoxelZ; CreateTextureInterp(deviceCount,&img[linear_idx_start],geoArray[sp],d_cuArrTex,texImg,!sp); cudaCheckErrors("Texture object creation fail"); int divU,divV; divU=PIXEL_SIZE_BLOCK; divV=PIXEL_SIZE_BLOCK; dim3 grid((geoArray[sp].nDetecU+divU-1)/divU,(geoArray[0].nDetecV+divV-1)/divV,1); dim3 block(divU,divV,PROJ_PER_BLOCK); unsigned int proj_global; float maxdist; // Now that we have prepared the image (piece of image) and parameters for kernels // we project for all angles. for (unsigned int i=0; i<noOfKernelCalls; i++) { for (dev=0;dev<deviceCount;dev++){ float is_spherical=0; hipSetDevice(dev); for(unsigned int j=0; j<PROJ_PER_BLOCK; j++){ proj_global=(i*PROJ_PER_BLOCK+j)+dev*nangles_device; if (proj_global>=nangles) break; if ((i*PROJ_PER_BLOCK+j)>=nangles_device) break; geoArray[sp].alpha=angles[proj_global*3]; geoArray[sp].theta=angles[proj_global*3+1]; geoArray[sp].psi =angles[proj_global*3+2]; is_spherical+=abs(geoArray[sp].theta)+abs(geoArray[sp].psi); //precomute distances for faster execution maxdist=maxdistanceCuboid(geoArray[sp],proj_global); //Precompute per angle constant stuff for speed computeDeltas(geoArray[sp], proj_global, &uvOrigin, &deltaU, &deltaV, &source); //Ray tracing! projParamsArrayHost[4*j]=uvOrigin; // 6*j because we have 6 Point3D values per projection projParamsArrayHost[4*j+1]=deltaU; projParamsArrayHost[4*j+2]=deltaV; projParamsArrayHost[4*j+3]=source; projFloatsArrayHost[2*j]=geo.DSO[proj_global]; projFloatsArrayHost[2*j+1]=floor(maxdist); } hipMemcpyToSymbolAsync(projParamsArrayDev, projParamsArrayHost, sizeof(Point3D)*4*PROJ_PER_BLOCK,0,hipMemcpyHostToDevice,stream[dev*nStream_device]); hipMemcpyToSymbolAsync(projFloatsArrayDev, projFloatsArrayHost, sizeof(float)*2*PROJ_PER_BLOCK,0,hipMemcpyHostToDevice,stream[dev*nStream_device]); hipStreamSynchronize(stream[dev*nStream_device]); //TODO: we could do this around X and Y axis too, but we would need to compute the new axis of rotation (not possible to know from jsut the angles) if (!is_spherical){ hipLaunchKernelGGL(( kernelPixelDetector<false>), dim3(grid),dim3(block),0,stream[dev*nStream_device], geoArray[sp],dProjection[(i%2)+dev*2],i,nangles_device,texImg[dev]); } else{ hipLaunchKernelGGL(( kernelPixelDetector<true>) , dim3(grid),dim3(block),0,stream[dev*nStream_device], geoArray[sp],dProjection[(i%2)+dev*2],i,nangles_device,texImg[dev]); } } // Now that the computation is happening, we need to either prepare the memory for // combining of the projections (splits>1) and start removing previous results. // If our image does not fit in memory then we need to make sure we accumulate previous results too. // This is done in 2 steps: // 1)copy previous results back into GPU // 2)accumulate with current results // The code to take them out is the same as when there are no splits needed if( !fits_in_memory&&sp>0) { // 1) grab previous results and put them in the auxiliary variable dProjection_accum for (dev = 0; dev < deviceCount; dev++) { hipSetDevice(dev); //Global index of FIRST projection on this set on this GPU proj_global=i*PROJ_PER_BLOCK+dev*nangles_device; if(proj_global>=nangles) break; // Unless its the last projection set, we have PROJ_PER_BLOCK angles. Otherwise... if(i+1==noOfKernelCalls) //is it the last block? projection_this_block=min(nangles_device-(noOfKernelCalls-1)*PROJ_PER_BLOCK, //the remaining angles that this GPU had to do (almost never PROJ_PER_BLOCK) nangles-proj_global); //or whichever amount is left to finish all (this is for the last GPU) else projection_this_block=PROJ_PER_BLOCK; hipMemcpyAsync(dProjection_accum[(i%2)+dev*2], result[proj_global], projection_this_block*geo.nDetecV*geo.nDetecU*sizeof(float), hipMemcpyHostToDevice,stream[dev*2+1]); } // 2) take the results from current compute call and add it to the code in execution. for (dev = 0; dev < deviceCount; dev++) { hipSetDevice(dev); //Global index of FIRST projection on this set on this GPU proj_global=i*PROJ_PER_BLOCK+dev*nangles_device; if(proj_global>=nangles) break; // Unless its the last projection set, we have PROJ_PER_BLOCK angles. Otherwise... if(i+1==noOfKernelCalls) //is it the last block? projection_this_block=min(nangles_device-(noOfKernelCalls-1)*PROJ_PER_BLOCK, //the remaining angles that this GPU had to do (almost never PROJ_PER_BLOCK) nangles-proj_global); //or whichever amount is left to finish all (this is for the last GPU) else projection_this_block=PROJ_PER_BLOCK; hipStreamSynchronize(stream[dev*2+1]); // wait until copy is finished hipLaunchKernelGGL(( vecAddInPlaceInterp), dim3((geo.nDetecU*geo.nDetecV*projection_this_block+MAXTREADS-1)/MAXTREADS),dim3(MAXTREADS),0,stream[dev*2], dProjection[(i%2)+dev*2],dProjection_accum[(i%2)+dev*2],(unsigned long)geo.nDetecU*geo.nDetecV*projection_this_block); } } // end accumulation case, where the image needs to be split // Now, lets get out the projections from the previous execution of the kernels. if (i>0) { for (dev = 0; dev < deviceCount; dev++) { hipSetDevice(dev); //Global index of FIRST projection on previous set on this GPU proj_global=(i-1)*PROJ_PER_BLOCK+dev*nangles_device; if (dev+1==deviceCount) { //is it the last device? // projections assigned to this device is >=nangles_device-(deviceCount-1) and < nangles_device if (i-1 < noOfKernelCallsLastDev) { // The previous set(block) was not empty. projection_this_block=min(PROJ_PER_BLOCK, nangles-proj_global); } else { // The previous set was empty. // This happens if deviceCount > PROJ_PER_BLOCK+1. // e.g. PROJ_PER_BLOCK = 9, deviceCount = 11, nangles = 199. // e.g. PROJ_PER_BLOCK = 1, deviceCount = 3, nangles = 7. break; } } else { projection_this_block=PROJ_PER_BLOCK; } hipMemcpyAsync(result[proj_global], dProjection[(int)(!(i%2))+dev*2], projection_this_block*geo.nDetecV*geo.nDetecU*sizeof(float), hipMemcpyDeviceToHost,stream[dev*2+1]); } } // Make sure Computation on kernels has finished before we launch the next batch. for (dev = 0; dev < deviceCount; dev++) { hipSetDevice(dev); hipStreamSynchronize(stream[dev*2]); } } // End noOfKernelCalls (i) loop. // We still have the last set of projections to get out of GPUs for (dev = 0; dev < deviceCount; dev++) { hipSetDevice(dev); //Global index of FIRST projection on this set on this GPU proj_global=(noOfKernelCalls-1)*PROJ_PER_BLOCK+dev*nangles_device; if(proj_global>=nangles) break; // How many projections are left here? projection_this_block=min(nangles_device-(noOfKernelCalls-1)*PROJ_PER_BLOCK, //the remaining angles that this GPU had to do (almost never PROJ_PER_BLOCK) nangles-proj_global); //or whichever amount is left to finish all (this is for the last GPU) hipDeviceSynchronize(); //Not really necesary, but just in case, we los nothing. cudaCheckErrors("Error at copying the last set of projections out (or in the previous copy)"); hipMemcpyAsync(result[proj_global], dProjection[(int)(!(noOfKernelCalls%2))+dev*2], projection_this_block*geo.nDetecV*geo.nDetecU*sizeof(float), hipMemcpyDeviceToHost,stream[dev*2+1]); } // Make sure everyone has done their bussiness before the next image split: for (dev = 0; dev < deviceCount; dev++) { hipSetDevice(dev); hipDeviceSynchronize(); } } // End image split loop. cudaCheckErrors("Main loop fail"); /////////////////////////////////////////////////////////////////////// /////////////////////////////////////////////////////////////////////// for (dev = 0; dev < deviceCount; dev++){ hipSetDevice(dev); hipDestroyTextureObject(texImg[dev]); hipFreeArray(d_cuArrTex[dev]); } delete[] texImg; texImg = 0; delete[] d_cuArrTex; d_cuArrTex = 0; // Freeing Stage for (dev = 0; dev < deviceCount; dev++){ hipSetDevice(dev); hipFree(dProjection[dev*2]); hipFree(dProjection[dev*2+1]); } free(dProjection); if(!fits_in_memory){ for (dev = 0; dev < deviceCount; dev++){ hipSetDevice(dev); hipFree(dProjection_accum[dev*2]); hipFree(dProjection_accum[dev*2+1]); } free(dProjection_accum); } freeGeoArray(splits,geoArray); hipHostFree(projParamsArrayHost); hipHostFree(projFloatsArrayHost); for (int i = 0; i < nStreams; ++i) hipStreamDestroy(stream[i]) ; if (isHostRegisterSupported & splits>1){ hipHostUnregister(img); } cudaCheckErrors("hipFree fail"); // hipDeviceReset(); return 0; } void CreateTextureInterp(int num_devices,const float* imagedata,Geometry geo,hipArray** d_cuArrTex, hipTextureObject_t *texImage,bool allocate) { //size_t size_image=geo.nVoxelX*geo.nVoxelY*geo.nVoxelZ; const hipExtent extent = make_hipExtent(geo.nVoxelX, geo.nVoxelY, geo.nVoxelZ); if(allocate){ for (unsigned int i = 0; i < num_devices; i++){ hipSetDevice(i); //hipArray Descriptor hipChannelFormatDesc channelDesc = hipCreateChannelDesc<float>(); //cuda Array hipMalloc3DArray(&d_cuArrTex[i], &channelDesc, extent); cudaCheckErrors("Texture memory allocation fail"); } } for (unsigned int i = 0; i < num_devices; i++){ hipMemcpy3DParms copyParams = {0}; hipSetDevice(i); //Array creation copyParams.srcPtr = make_hipPitchedPtr((void *)imagedata, extent.width*sizeof(float), extent.width, extent.height); copyParams.dstArray = d_cuArrTex[i]; copyParams.extent = extent; copyParams.kind = hipMemcpyHostToDevice; hipMemcpy3DAsync(&copyParams); //cudaCheckErrors("Texture memory data copy fail"); //Array creation End } for (unsigned int i = 0; i < num_devices; i++){ hipSetDevice(i); hipResourceDesc texRes; memset(&texRes, 0, sizeof(hipResourceDesc)); texRes.resType = hipResourceTypeArray; texRes.res.array.array = d_cuArrTex[i]; hipTextureDesc texDescr; memset(&texDescr, 0, sizeof(hipTextureDesc)); texDescr.normalizedCoords = false; if (geo.accuracy>1){ texDescr.filterMode = hipFilterModePoint; geo.accuracy=1; } else{ texDescr.filterMode = hipFilterModeLinear; } texDescr.addressMode[0] = hipAddressModeBorder; texDescr.addressMode[1] = hipAddressModeBorder; texDescr.addressMode[2] = hipAddressModeBorder; texDescr.readMode = hipReadModeElementType; hipCreateTextureObject(&texImage[i], &texRes, &texDescr, NULL); cudaCheckErrors("Texture object creation fail"); } } /* This code generates the geometries needed to split the image properly in * cases where the entire image does not fit in the memory of the GPU **/ void splitImageInterp(unsigned int splits,Geometry geo,Geometry* geoArray, unsigned int nangles){ unsigned long splitsize=(geo.nVoxelZ+splits-1)/splits;// ceil if not divisible for(unsigned int sp=0;sp<splits;sp++){ geoArray[sp]=geo; // All of them are splitsize, but the last one, possible geoArray[sp].nVoxelZ=((sp+1)*splitsize<geo.nVoxelZ)? splitsize: geo.nVoxelZ-splitsize*sp; geoArray[sp].sVoxelZ= geoArray[sp].nVoxelZ* geoArray[sp].dVoxelZ; // We need to redefine the offsets, as now each subimage is not aligned in the origin. geoArray[sp].offOrigZ=(float *)malloc(nangles*sizeof(float)); for (unsigned int i=0;i<nangles;i++){ geoArray[sp].offOrigZ[i]=geo.offOrigZ[i]-geo.sVoxelZ/2+sp*geoArray[0].sVoxelZ+geoArray[sp].sVoxelZ/2; } } } /* This code precomputes The location of the source and the Delta U and delta V (in the warped space) * to compute the locations of the x-rays. While it seems verbose and overly-optimized, * it does saves about 30% of each of the kernel calls. Thats something! **/ void computeDeltas(Geometry geo,unsigned int i, Point3D* uvorigin, Point3D* deltaU, Point3D* deltaV, Point3D* source){ Point3D S; S.x=geo.DSO[i]; S.y=0; S.z=0; //End point Point3D P,Pu0,Pv0; P.x =-(geo.DSD[i]-geo.DSO[i]); P.y = geo.dDetecU*(0-((float)geo.nDetecU/2)+0.5); P.z = geo.dDetecV*(((float)geo.nDetecV/2)-0.5-0); Pu0.x=-(geo.DSD[i]-geo.DSO[i]); Pu0.y= geo.dDetecU*(1-((float)geo.nDetecU/2)+0.5); Pu0.z= geo.dDetecV*(((float)geo.nDetecV/2)-0.5-0); Pv0.x=-(geo.DSD[i]-geo.DSO[i]); Pv0.y= geo.dDetecU*(0-((float)geo.nDetecU/2)+0.5); Pv0.z= geo.dDetecV*(((float)geo.nDetecV/2)-0.5-1); // Geomtric trasnformations: // Now we have the Real world (OXYZ) coordinates of the bottom corner and its two neighbours. // The obkjective is to get a position of the detector in a coordinate system where: // 1-units are voxel size (in each direction can be different) // 2-The image has the its first voxel at (0,0,0) // 3-The image never rotates // To do that, we need to compute the "deltas" the detector, or "by how much // (in new xyz) does the voxels change when and index is added". To do that // several geometric steps needs to be changed //1.Roll,pitch,jaw // The detector can have a small rotation. // according to //"A geometric calibration method for cone beam CT systems" Yang K1, Kwan AL, Miller DF, Boone JM. Med Phys. 2006 Jun;33(6):1695-706. // Only the Z rotation will have a big influence in the image quality when they are small. // Still all rotations are supported // To roll pitch jaw, the detector has to be in centered in OXYZ. P.x=0;Pu0.x=0;Pv0.x=0; // Roll pitch yaw rollPitchYaw(geo,i,&P); rollPitchYaw(geo,i,&Pu0); rollPitchYaw(geo,i,&Pv0); //Now ltes translate the detector coordinates to DOD (original position on real coordinate system: P.x=P.x-(geo.DSD[i]-geo.DSO[i]); Pu0.x=Pu0.x-(geo.DSD[i]-geo.DSO[i]); Pv0.x=Pv0.x-(geo.DSD[i]-geo.DSO[i]); //2: Offset detector //S doesnt need to chagne //3: Rotate around RZ RY RZ Point3D Pfinal, Pfinalu0, Pfinalv0; Pfinal.x =P.x; Pfinal.y =P.y +geo.offDetecU[i]; Pfinal.z =P.z +geo.offDetecV[i]; Pfinalu0.x=Pu0.x; Pfinalu0.y=Pu0.y +geo.offDetecU[i]; Pfinalu0.z =Pu0.z +geo.offDetecV[i]; Pfinalv0.x=Pv0.x; Pfinalv0.y=Pv0.y +geo.offDetecU[i]; Pfinalv0.z =Pv0.z +geo.offDetecV[i]; eulerZYZ(geo,&Pfinal); eulerZYZ(geo,&Pfinalu0); eulerZYZ(geo,&Pfinalv0); eulerZYZ(geo,&S); //3: Offset image (instead of offseting image, -offset everything else) Pfinal.x =Pfinal.x-geo.offOrigX[i]; Pfinal.y =Pfinal.y-geo.offOrigY[i]; Pfinal.z =Pfinal.z-geo.offOrigZ[i]; Pfinalu0.x=Pfinalu0.x-geo.offOrigX[i]; Pfinalu0.y=Pfinalu0.y-geo.offOrigY[i]; Pfinalu0.z=Pfinalu0.z-geo.offOrigZ[i]; Pfinalv0.x=Pfinalv0.x-geo.offOrigX[i]; Pfinalv0.y=Pfinalv0.y-geo.offOrigY[i]; Pfinalv0.z=Pfinalv0.z-geo.offOrigZ[i]; S.x=S.x-geo.offOrigX[i]; S.y=S.y-geo.offOrigY[i]; S.z=S.z-geo.offOrigZ[i]; // As we want the (0,0,0) to be in a corner of the image, we need to translate everything (after rotation); Pfinal.x =Pfinal.x+geo.sVoxelX/2-geo.dVoxelX/2; Pfinal.y =Pfinal.y+geo.sVoxelY/2-geo.dVoxelY/2; Pfinal.z =Pfinal.z +geo.sVoxelZ/2-geo.dVoxelZ/2; Pfinalu0.x=Pfinalu0.x+geo.sVoxelX/2-geo.dVoxelX/2; Pfinalu0.y=Pfinalu0.y+geo.sVoxelY/2-geo.dVoxelY/2; Pfinalu0.z=Pfinalu0.z+geo.sVoxelZ/2-geo.dVoxelZ/2; Pfinalv0.x=Pfinalv0.x+geo.sVoxelX/2-geo.dVoxelX/2; Pfinalv0.y=Pfinalv0.y+geo.sVoxelY/2-geo.dVoxelY/2; Pfinalv0.z=Pfinalv0.z+geo.sVoxelZ/2-geo.dVoxelZ/2; S.x =S.x+geo.sVoxelX/2-geo.dVoxelX/2; S.y =S.y+geo.sVoxelY/2-geo.dVoxelY/2; S.z =S.z +geo.sVoxelZ/2-geo.dVoxelZ/2; //4. Scale everything so dVoxel==1 Pfinal.x =Pfinal.x/geo.dVoxelX; Pfinal.y =Pfinal.y/geo.dVoxelY; Pfinal.z =Pfinal.z/geo.dVoxelZ; Pfinalu0.x=Pfinalu0.x/geo.dVoxelX; Pfinalu0.y=Pfinalu0.y/geo.dVoxelY; Pfinalu0.z=Pfinalu0.z/geo.dVoxelZ; Pfinalv0.x=Pfinalv0.x/geo.dVoxelX; Pfinalv0.y=Pfinalv0.y/geo.dVoxelY; Pfinalv0.z=Pfinalv0.z/geo.dVoxelZ; S.x =S.x/geo.dVoxelX; S.y =S.y/geo.dVoxelY; S.z =S.z/geo.dVoxelZ; //mexPrintf("COR: %f \n",geo.COR[i]); //5. apply COR. Wherever everything was, now its offesetd by a bit. // Only wors for standard rotaiton, not aribtary axis rotation. float CORx, CORy; CORx=-geo.COR[i]*sin(geo.alpha)/geo.dVoxelX; CORy= geo.COR[i]*cos(geo.alpha)/geo.dVoxelY; Pfinal.x+=CORx; Pfinal.y+=CORy; Pfinalu0.x+=CORx; Pfinalu0.y+=CORy; Pfinalv0.x+=CORx; Pfinalv0.y+=CORy; S.x+=CORx; S.y+=CORy; // return *uvorigin=Pfinal; deltaU->x=Pfinalu0.x-Pfinal.x; deltaU->y=Pfinalu0.y-Pfinal.y; deltaU->z=Pfinalu0.z-Pfinal.z; deltaV->x=Pfinalv0.x-Pfinal.x; deltaV->y=Pfinalv0.y-Pfinal.y; deltaV->z=Pfinalv0.z-Pfinal.z; *source=S; } float maxdistanceCuboid(Geometry geo,unsigned int i){ /////////// // Compute initial "t" so we access safely as less as out of bounds as possible. ////////// float maxCubX,maxCubY,maxCubZ; // Forgetting Z, compute mas distance: diagonal+offset maxCubX=(geo.nVoxelX/2+ abs(geo.offOrigX[i])/geo.dVoxelX); maxCubY=(geo.nVoxelY/2+ abs(geo.offOrigY[i])/geo.dVoxelY); maxCubZ=(geo.nVoxelZ/2+ abs(geo.offOrigZ[i])/geo.dVoxelZ); float a,b; a=geo.DSO[i]/geo.dVoxelX; b=geo.DSO[i]/geo.dVoxelY; // As the return of this value is in "voxel space", the source may have an elliptical curve. // The distance returned is the safe distance that can be skipped for a given angle alpha, before we need to start sampling. if (geo.theta==0.0f & geo.psi==0.0f) // Special case, it will make the code faster return max(a*b/sqrt(a*a*sin(geo.alpha)*sin(geo.alpha)+b*b*cos(geo.alpha)*cos(geo.alpha))- sqrt(maxCubX*maxCubX+maxCubY*maxCubY),0.0f); //TODO: think of more special cases? return max(geo.DSO[i]/max(max(geo.dVoxelX,geo.dVoxelY),geo.dVoxelZ)-sqrt(maxCubX*maxCubX+maxCubY*maxCubY+maxCubZ*maxCubZ),0.0f); } void rollPitchYaw(Geometry geo,unsigned int i, Point3D* point){ Point3D auxPoint; auxPoint.x=point->x; auxPoint.y=point->y; auxPoint.z=point->z; point->x=cos(geo.dRoll[i])*cos(geo.dPitch[i])*auxPoint.x +(cos(geo.dRoll[i])*sin(geo.dPitch[i])*sin(geo.dYaw[i]) - sin(geo.dRoll[i])*cos(geo.dYaw[i]))*auxPoint.y +(cos(geo.dRoll[i])*sin(geo.dPitch[i])*cos(geo.dYaw[i]) + sin(geo.dRoll[i])*sin(geo.dYaw[i]))*auxPoint.z; point->y=sin(geo.dRoll[i])*cos(geo.dPitch[i])*auxPoint.x +(sin(geo.dRoll[i])*sin(geo.dPitch[i])*sin(geo.dYaw[i]) + cos(geo.dRoll[i])*cos(geo.dYaw[i]))*auxPoint.y +(sin(geo.dRoll[i])*sin(geo.dPitch[i])*cos(geo.dYaw[i]) - cos(geo.dRoll[i])*sin(geo.dYaw[i]))*auxPoint.z; point->z=-sin(geo.dPitch[i])*auxPoint.x +cos(geo.dPitch[i])*sin(geo.dYaw[i])*auxPoint.y +cos(geo.dPitch[i])*cos(geo.dYaw[i])*auxPoint.z; } void eulerZYZ(Geometry geo, Point3D* point){ Point3D auxPoint; auxPoint.x=point->x; auxPoint.y=point->y; auxPoint.z=point->z; point->x=(+cos(geo.alpha)*cos(geo.theta)*cos(geo.psi)-sin(geo.alpha)*sin(geo.psi))*auxPoint.x+ (-cos(geo.alpha)*cos(geo.theta)*sin(geo.psi)-sin(geo.alpha)*cos(geo.psi))*auxPoint.y+ cos(geo.alpha)*sin(geo.theta)*auxPoint.z; point->y=(+sin(geo.alpha)*cos(geo.theta)*cos(geo.psi)+cos(geo.alpha)*sin(geo.psi))*auxPoint.x+ (-sin(geo.alpha)*cos(geo.theta)*sin(geo.psi)+cos(geo.alpha)*cos(geo.psi))*auxPoint.y+ sin(geo.alpha)*sin(geo.theta)*auxPoint.z; point->z=-sin(geo.theta)*cos(geo.psi)*auxPoint.x+ sin(geo.theta)*sin(geo.psi)*auxPoint.y+ cos(geo.theta)*auxPoint.z; } //______________________________________________________________________________ // // Function: freeGeoArray // // Description: Frees the memory from the geometry array for multiGPU. //______________________________________________________________________________ void freeGeoArray(unsigned int splits,Geometry* geoArray){ for(unsigned int sp=0;sp<splits;sp++){ free(geoArray[sp].offOrigZ); } free(geoArray); } //______________________________________________________________________________ // // Function: checkFreeMemory // // Description: check available memory on devices //______________________________________________________________________________ void checkFreeMemory(int deviceCount,size_t *mem_GPU_global){ size_t memfree; size_t memtotal; for (int dev = 0; dev < deviceCount; dev++){ hipSetDevice(dev); hipMemGetInfo(&memfree,&memtotal); if(dev==0) *mem_GPU_global=memfree; if(memfree<memtotal/2){ mexErrMsgIdAndTxt("tvDenoise:tvdenoising:GPU","One (or more) of your GPUs is being heavily used by another program (possibly graphics-based).\n Free the GPU to run TIGRE\n"); } cudaCheckErrors("Check mem error"); *mem_GPU_global=(memfree<*mem_GPU_global)?memfree:*mem_GPU_global; } *mem_GPU_global=(size_t)((double)*mem_GPU_global*0.95); //*mem_GPU_global= insert your known number here, in bytes. }
8ff575dd5904623257df3fae8ae706d2d163fd43.cu
/*------------------------------------------------------------------------- * * CUDA functions for texture-memory interpolation based projection * * This file has the necesary fucntiosn to perform X-ray CBCT projection * operation given a geaometry, angles and image. It uses the 3D texture * memory linear interpolation to uniformily sample a path to integrate the * X-rays. * * CODE by Ander Biguri * Sepideh Hatamikia (arbitrary rotation) * --------------------------------------------------------------------------- * --------------------------------------------------------------------------- * Copyright (c) 2015, University of Bath and CERN- European Organization for * Nuclear Research * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are met: * * 1. Redistributions of source code must retain the above copyright notice, * this list of conditions and the following disclaimer. * * 2. Redistributions in binary form must reproduce the above copyright notice, * this list of conditions and the following disclaimer in the documentation * and/or other materials provided with the distribution. * * 3. Neither the name of the copyright holder nor the names of its contributors * may be used to endorse or promote products derived from this software without * specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE * POSSIBILITY OF SUCH DAMAGE. * --------------------------------------------------------------------------- * * Contact: [email protected] * Codes : https://github.com/CERN/TIGRE * --------------------------------------------------------------------------- */ #include <algorithm> #include <cuda_runtime_api.h> #include <cuda.h> #include "ray_interpolated_projection.hpp" #include "TIGRE_common.hpp" #include <math.h> #define cudaCheckErrors(msg) \ do { \ cudaError_t __err = cudaGetLastError(); \ if (__err != cudaSuccess) { \ mexPrintf("%s \n",msg);\ cudaDeviceReset();\ mexErrMsgIdAndTxt("TIGRE:Ax:interpolated",cudaGetErrorString(__err));\ } \ } while (0) // Declare the texture reference. #define MAXTREADS 1024 #define PROJ_PER_BLOCK 9 #define PIXEL_SIZE_BLOCK 9 /*GEOMETRY DEFINITION * * Detector plane, behind * |-----------------------------| * | | * | | * | | * | | * | +--------+ | * | / /| | * A Z | / / |*D | * | | +--------+ | | * | | | | | | * | | | *O | + | * --->y | | | / | * / | | |/ | * V X | +--------+ | * |-----------------------------| * * *S * * * * * **/ void CreateTextureInterp(int num_devices,const float* imagedata,Geometry geo,cudaArray** d_cuArrTex, cudaTextureObject_t *texImage,bool allocate); __constant__ Point3D projParamsArrayDev[4*PROJ_PER_BLOCK]; // Dev means it is on device __constant__ float projFloatsArrayDev[2*PROJ_PER_BLOCK]; // Dev means it is on device __global__ void vecAddInPlaceInterp(float *a, float *b, unsigned long n) { int idx = blockIdx.x*blockDim.x+threadIdx.x; // Make sure we do not go out of bounds if (idx < n) a[idx] = a[idx] + b[idx]; } template<bool sphericalrotation> __global__ void kernelPixelDetector( Geometry geo, float* detector, const int currProjSetNumber, const int totalNoOfProjections, cudaTextureObject_t tex){ unsigned long u = blockIdx.x * blockDim.x + threadIdx.x; unsigned long v = blockIdx.y * blockDim.y + threadIdx.y; unsigned long projNumber=threadIdx.z; if (u>= geo.nDetecU || v>= geo.nDetecV || projNumber>=PROJ_PER_BLOCK) return; #if IS_FOR_MATLAB_TIGRE size_t idx = (size_t)(u * geo.nDetecV + v)+ (size_t)projNumber*geo.nDetecV *geo.nDetecU ; #else size_t idx = (size_t)(v * geo.nDetecU + u)+ (size_t)projNumber*geo.nDetecV *geo.nDetecU ; #endif int indAlpha = currProjSetNumber*PROJ_PER_BLOCK+projNumber; // This is the ABSOLUTE projection number in the projection array if(indAlpha>=totalNoOfProjections) return; Point3D uvOrigin = projParamsArrayDev[4*projNumber]; // 6*projNumber because we have 6 Point3D values per projection Point3D deltaU = projParamsArrayDev[4*projNumber+1]; Point3D deltaV = projParamsArrayDev[4*projNumber+2]; Point3D source = projParamsArrayDev[4*projNumber+3]; float DSO = projFloatsArrayDev[2*projNumber+0]; float cropdist_init = projFloatsArrayDev[2*projNumber+1]; /////// Get coordinates XYZ of pixel UV int pixelV = geo.nDetecV-v-1; int pixelU = u; float vectX,vectY,vectZ; Point3D P; P.x=(uvOrigin.x+pixelU*deltaU.x+pixelV*deltaV.x); P.y=(uvOrigin.y+pixelU*deltaU.y+pixelV*deltaV.y); P.z=(uvOrigin.z+pixelU*deltaU.z+pixelV*deltaV.z); // Length is the ray length in normalized space float length=__fsqrt_rd((source.x-P.x)*(source.x-P.x)+(source.y-P.y)*(source.y-P.y)+(source.z-P.z)*(source.z-P.z)); //now legth is an integer of Nsamples that are required on this line length=ceilf(__fdividef(length,geo.accuracy));//Divide the directional vector by an integer vectX=__fdividef(P.x -source.x,length); vectY=__fdividef(P.y -source.y,length); vectZ=__fdividef(P.z -source.z,length); // //Integrate over the line float tx,ty,tz; float sum=0; float i; // Because I have no idea how to efficiently cutoff the legth path in 3D, a very upper limit is computed (see maxdistanceCuboid) // for the 3D case. However it would be bad to lose performance in the 3D case // TODO: can ge really improve this? if (sphericalrotation){ if ((2*DSO/fminf(fminf(geo.dVoxelX,geo.dVoxelY),geo.dVoxelZ)+cropdist_init)/geo.accuracy < length) length=ceilf((2*DSO/fminf(fminf(geo.dVoxelX,geo.dVoxelY),geo.dVoxelZ)+cropdist_init)/geo.accuracy); } else{ if ((2*DSO/fminf(geo.dVoxelX,geo.dVoxelY)+cropdist_init)/geo.accuracy < length) length=ceilf((2*DSO/fminf(geo.dVoxelX,geo.dVoxelY)+cropdist_init)/geo.accuracy); } //Length is not actually a length, but the amount of memreads with given accuracy ("samples per voxel") for (i=floorf(cropdist_init/geo.accuracy); i<=length; i=i+1){ tx=vectX*i+source.x; ty=vectY*i+source.y; tz=vectZ*i+source.z; sum += tex3D<float>(tex, tx+0.5f, ty+0.5f, tz+0.5f); // this line is 94% of time. } float deltalength=sqrtf((vectX*geo.dVoxelX)*(vectX*geo.dVoxelX)+ (vectY*geo.dVoxelY)*(vectY*geo.dVoxelY)+ (vectZ*geo.dVoxelZ)*(vectZ*geo.dVoxelZ) ); detector[idx]=sum*deltalength; } // legnth(angles)=3 x nagnles, as we have roll, pitch, yaw. int interpolation_projection(float * img, Geometry geo, float** result,float const * const angles,int nangles){ // Prepare for MultiGPU int deviceCount = 0; cudaGetDeviceCount(&deviceCount); cudaCheckErrors("Device query fail"); if (deviceCount == 0) { mexErrMsgIdAndTxt("Ax:Interpolated_projection:GPUselect","There are no available device(s) that support CUDA\n"); } // // CODE assumes // 1.-All available devices are usable by this code // 2.-All available devices are equal, they are the same machine (warning trhown) int dev; const int devicenamelength = 256; // The length 256 is fixed by spec of cudaDeviceProp::name char devicename[devicenamelength]; cudaDeviceProp deviceProp; for (dev = 0; dev < deviceCount; dev++) { cudaSetDevice(dev); cudaGetDeviceProperties(&deviceProp, dev); if (dev>0){ if (strcmp(devicename,deviceProp.name)!=0){ mexWarnMsgIdAndTxt("Ax:GPUselect","Detected one (or more) different GPUs.\n This code is not smart enough to separate the memory GPU wise if they have different computational times or memory limits.\n First GPU parameters used. If the code errors you might need to change the way GPU selection is performed. \n Siddon_projection.cu line 275."); break; } } memset(devicename, 0, devicenamelength); strcpy(devicename, deviceProp.name); } // Check free memory size_t mem_GPU_global; checkFreeMemory(deviceCount,&mem_GPU_global); // printf("geo.nDetec (U, V) = %d, %d\n", geo.nDetecU, geo.nDetecV); size_t mem_image=(unsigned long long)geo.nVoxelX*(unsigned long long)geo.nVoxelY*(unsigned long long)geo.nVoxelZ*sizeof(float); size_t mem_proj =(unsigned long long)geo.nDetecU*(unsigned long long)geo.nDetecV * sizeof(float); // Does everything fit in the GPUs? const bool fits_in_memory = mem_image+2*PROJ_PER_BLOCK*mem_proj<mem_GPU_global; unsigned int splits=1; if (!fits_in_memory) { // Nope nope. // approx free memory we have. We already have left some extra 5% free for internal stuff // we need a second projection memory to combine multi-GPU stuff. size_t mem_free=mem_GPU_global-4*PROJ_PER_BLOCK*mem_proj; splits=mem_image/mem_free+1;// Ceil of the truncation } Geometry* geoArray = (Geometry*)malloc(splits*sizeof(Geometry)); splitImageInterp(splits,geo,geoArray,nangles); // Allocate auiliary memory for projections on the GPU to accumulate partial resutsl float ** dProjection_accum; size_t num_bytes_proj = PROJ_PER_BLOCK*geo.nDetecU*geo.nDetecV * sizeof(float); if (!fits_in_memory){ dProjection_accum=(float**)malloc(2*deviceCount*sizeof(float*)); for (dev = 0; dev < deviceCount; dev++) { cudaSetDevice(dev); for (int i = 0; i < 2; ++i){ cudaMalloc((void**)&dProjection_accum[dev*2+i], num_bytes_proj); cudaMemset(dProjection_accum[dev*2+i],0,num_bytes_proj); cudaCheckErrors("cudaMallocauxiliarty projections fail"); } } } // This is happening regarthless if the image fits on memory float** dProjection=(float**)malloc(2*deviceCount*sizeof(float*)); for (dev = 0; dev < deviceCount; dev++){ cudaSetDevice(dev); for (int i = 0; i < 2; ++i){ cudaMalloc((void**)&dProjection[dev*2+i], num_bytes_proj); cudaMemset(dProjection[dev*2+i] ,0,num_bytes_proj); cudaCheckErrors("cudaMalloc projections fail"); } } //Pagelock memory for syncronous copy. // Lets try to make the host memory pinned: // We laredy queried the GPU and assuemd they are the same, thus shoudl have the same attributes. int isHostRegisterSupported; cudaDeviceGetAttribute(&isHostRegisterSupported,cudaDevAttrHostRegisterSupported,0); // empirical testing shows that when the image split is smaller than 1 (also implies the image is not very big), the time to // pin the memory is greater than the lost time in Syncronously launching the memcpys. This is only worth it when the image is too big. if (isHostRegisterSupported & splits>1){ cudaHostRegister(img, (size_t)geo.nVoxelX*(size_t)geo.nVoxelY*(size_t)geo.nVoxelZ*(size_t)sizeof(float),cudaHostRegisterPortable); } cudaCheckErrors("Error pinning memory"); Point3D source, deltaU, deltaV, uvOrigin; Point3D* projParamsArrayHost = 0; cudaMallocHost((void**)&projParamsArrayHost,4*PROJ_PER_BLOCK*sizeof(Point3D)); float* projFloatsArrayHost = 0; cudaMallocHost((void**)&projFloatsArrayHost,2*PROJ_PER_BLOCK*sizeof(float)); cudaCheckErrors("Error allocating auxiliary constant memory"); // Create Streams for overlapping memcopy and compute int nStream_device=2; int nStreams=deviceCount*nStream_device; cudaStream_t* stream=(cudaStream_t*)malloc(nStreams*sizeof(cudaStream_t)); for (dev = 0; dev < deviceCount; dev++){ cudaSetDevice(dev); for (int i = 0; i < nStream_device; ++i){ cudaStreamCreate(&stream[i+dev*nStream_device]); } } cudaCheckErrors("Stream creation fail"); int nangles_device=(nangles+deviceCount-1)/deviceCount; int nangles_last_device=(nangles-(deviceCount-1)*nangles_device); unsigned int noOfKernelCalls = (nangles_device+PROJ_PER_BLOCK-1)/PROJ_PER_BLOCK; // We'll take care of bounds checking inside the loop if nalpha is not divisible by PROJ_PER_BLOCK unsigned int noOfKernelCallsLastDev = (nangles_last_device+PROJ_PER_BLOCK-1)/PROJ_PER_BLOCK; // we will use this in the memory management. int projection_this_block; cudaTextureObject_t *texImg = new cudaTextureObject_t[deviceCount]; cudaArray **d_cuArrTex = new cudaArray*[deviceCount]; for (unsigned int sp=0;sp<splits;sp++){ // Create texture objects for all GPUs size_t linear_idx_start; // They are all the same size, except the last one. linear_idx_start= (size_t)sp*(size_t)geoArray[0].nVoxelX*(size_t)geoArray[0].nVoxelY*(size_t)geoArray[0].nVoxelZ; CreateTextureInterp(deviceCount,&img[linear_idx_start],geoArray[sp],d_cuArrTex,texImg,!sp); cudaCheckErrors("Texture object creation fail"); int divU,divV; divU=PIXEL_SIZE_BLOCK; divV=PIXEL_SIZE_BLOCK; dim3 grid((geoArray[sp].nDetecU+divU-1)/divU,(geoArray[0].nDetecV+divV-1)/divV,1); dim3 block(divU,divV,PROJ_PER_BLOCK); unsigned int proj_global; float maxdist; // Now that we have prepared the image (piece of image) and parameters for kernels // we project for all angles. for (unsigned int i=0; i<noOfKernelCalls; i++) { for (dev=0;dev<deviceCount;dev++){ float is_spherical=0; cudaSetDevice(dev); for(unsigned int j=0; j<PROJ_PER_BLOCK; j++){ proj_global=(i*PROJ_PER_BLOCK+j)+dev*nangles_device; if (proj_global>=nangles) break; if ((i*PROJ_PER_BLOCK+j)>=nangles_device) break; geoArray[sp].alpha=angles[proj_global*3]; geoArray[sp].theta=angles[proj_global*3+1]; geoArray[sp].psi =angles[proj_global*3+2]; is_spherical+=abs(geoArray[sp].theta)+abs(geoArray[sp].psi); //precomute distances for faster execution maxdist=maxdistanceCuboid(geoArray[sp],proj_global); //Precompute per angle constant stuff for speed computeDeltas(geoArray[sp], proj_global, &uvOrigin, &deltaU, &deltaV, &source); //Ray tracing! projParamsArrayHost[4*j]=uvOrigin; // 6*j because we have 6 Point3D values per projection projParamsArrayHost[4*j+1]=deltaU; projParamsArrayHost[4*j+2]=deltaV; projParamsArrayHost[4*j+3]=source; projFloatsArrayHost[2*j]=geo.DSO[proj_global]; projFloatsArrayHost[2*j+1]=floor(maxdist); } cudaMemcpyToSymbolAsync(projParamsArrayDev, projParamsArrayHost, sizeof(Point3D)*4*PROJ_PER_BLOCK,0,cudaMemcpyHostToDevice,stream[dev*nStream_device]); cudaMemcpyToSymbolAsync(projFloatsArrayDev, projFloatsArrayHost, sizeof(float)*2*PROJ_PER_BLOCK,0,cudaMemcpyHostToDevice,stream[dev*nStream_device]); cudaStreamSynchronize(stream[dev*nStream_device]); //TODO: we could do this around X and Y axis too, but we would need to compute the new axis of rotation (not possible to know from jsut the angles) if (!is_spherical){ kernelPixelDetector<false><<<grid,block,0,stream[dev*nStream_device]>>>(geoArray[sp],dProjection[(i%2)+dev*2],i,nangles_device,texImg[dev]); } else{ kernelPixelDetector<true> <<<grid,block,0,stream[dev*nStream_device]>>>(geoArray[sp],dProjection[(i%2)+dev*2],i,nangles_device,texImg[dev]); } } // Now that the computation is happening, we need to either prepare the memory for // combining of the projections (splits>1) and start removing previous results. // If our image does not fit in memory then we need to make sure we accumulate previous results too. // This is done in 2 steps: // 1)copy previous results back into GPU // 2)accumulate with current results // The code to take them out is the same as when there are no splits needed if( !fits_in_memory&&sp>0) { // 1) grab previous results and put them in the auxiliary variable dProjection_accum for (dev = 0; dev < deviceCount; dev++) { cudaSetDevice(dev); //Global index of FIRST projection on this set on this GPU proj_global=i*PROJ_PER_BLOCK+dev*nangles_device; if(proj_global>=nangles) break; // Unless its the last projection set, we have PROJ_PER_BLOCK angles. Otherwise... if(i+1==noOfKernelCalls) //is it the last block? projection_this_block=min(nangles_device-(noOfKernelCalls-1)*PROJ_PER_BLOCK, //the remaining angles that this GPU had to do (almost never PROJ_PER_BLOCK) nangles-proj_global); //or whichever amount is left to finish all (this is for the last GPU) else projection_this_block=PROJ_PER_BLOCK; cudaMemcpyAsync(dProjection_accum[(i%2)+dev*2], result[proj_global], projection_this_block*geo.nDetecV*geo.nDetecU*sizeof(float), cudaMemcpyHostToDevice,stream[dev*2+1]); } // 2) take the results from current compute call and add it to the code in execution. for (dev = 0; dev < deviceCount; dev++) { cudaSetDevice(dev); //Global index of FIRST projection on this set on this GPU proj_global=i*PROJ_PER_BLOCK+dev*nangles_device; if(proj_global>=nangles) break; // Unless its the last projection set, we have PROJ_PER_BLOCK angles. Otherwise... if(i+1==noOfKernelCalls) //is it the last block? projection_this_block=min(nangles_device-(noOfKernelCalls-1)*PROJ_PER_BLOCK, //the remaining angles that this GPU had to do (almost never PROJ_PER_BLOCK) nangles-proj_global); //or whichever amount is left to finish all (this is for the last GPU) else projection_this_block=PROJ_PER_BLOCK; cudaStreamSynchronize(stream[dev*2+1]); // wait until copy is finished vecAddInPlaceInterp<<<(geo.nDetecU*geo.nDetecV*projection_this_block+MAXTREADS-1)/MAXTREADS,MAXTREADS,0,stream[dev*2]>>>(dProjection[(i%2)+dev*2],dProjection_accum[(i%2)+dev*2],(unsigned long)geo.nDetecU*geo.nDetecV*projection_this_block); } } // end accumulation case, where the image needs to be split // Now, lets get out the projections from the previous execution of the kernels. if (i>0) { for (dev = 0; dev < deviceCount; dev++) { cudaSetDevice(dev); //Global index of FIRST projection on previous set on this GPU proj_global=(i-1)*PROJ_PER_BLOCK+dev*nangles_device; if (dev+1==deviceCount) { //is it the last device? // projections assigned to this device is >=nangles_device-(deviceCount-1) and < nangles_device if (i-1 < noOfKernelCallsLastDev) { // The previous set(block) was not empty. projection_this_block=min(PROJ_PER_BLOCK, nangles-proj_global); } else { // The previous set was empty. // This happens if deviceCount > PROJ_PER_BLOCK+1. // e.g. PROJ_PER_BLOCK = 9, deviceCount = 11, nangles = 199. // e.g. PROJ_PER_BLOCK = 1, deviceCount = 3, nangles = 7. break; } } else { projection_this_block=PROJ_PER_BLOCK; } cudaMemcpyAsync(result[proj_global], dProjection[(int)(!(i%2))+dev*2], projection_this_block*geo.nDetecV*geo.nDetecU*sizeof(float), cudaMemcpyDeviceToHost,stream[dev*2+1]); } } // Make sure Computation on kernels has finished before we launch the next batch. for (dev = 0; dev < deviceCount; dev++) { cudaSetDevice(dev); cudaStreamSynchronize(stream[dev*2]); } } // End noOfKernelCalls (i) loop. // We still have the last set of projections to get out of GPUs for (dev = 0; dev < deviceCount; dev++) { cudaSetDevice(dev); //Global index of FIRST projection on this set on this GPU proj_global=(noOfKernelCalls-1)*PROJ_PER_BLOCK+dev*nangles_device; if(proj_global>=nangles) break; // How many projections are left here? projection_this_block=min(nangles_device-(noOfKernelCalls-1)*PROJ_PER_BLOCK, //the remaining angles that this GPU had to do (almost never PROJ_PER_BLOCK) nangles-proj_global); //or whichever amount is left to finish all (this is for the last GPU) cudaDeviceSynchronize(); //Not really necesary, but just in case, we los nothing. cudaCheckErrors("Error at copying the last set of projections out (or in the previous copy)"); cudaMemcpyAsync(result[proj_global], dProjection[(int)(!(noOfKernelCalls%2))+dev*2], projection_this_block*geo.nDetecV*geo.nDetecU*sizeof(float), cudaMemcpyDeviceToHost,stream[dev*2+1]); } // Make sure everyone has done their bussiness before the next image split: for (dev = 0; dev < deviceCount; dev++) { cudaSetDevice(dev); cudaDeviceSynchronize(); } } // End image split loop. cudaCheckErrors("Main loop fail"); /////////////////////////////////////////////////////////////////////// /////////////////////////////////////////////////////////////////////// for (dev = 0; dev < deviceCount; dev++){ cudaSetDevice(dev); cudaDestroyTextureObject(texImg[dev]); cudaFreeArray(d_cuArrTex[dev]); } delete[] texImg; texImg = 0; delete[] d_cuArrTex; d_cuArrTex = 0; // Freeing Stage for (dev = 0; dev < deviceCount; dev++){ cudaSetDevice(dev); cudaFree(dProjection[dev*2]); cudaFree(dProjection[dev*2+1]); } free(dProjection); if(!fits_in_memory){ for (dev = 0; dev < deviceCount; dev++){ cudaSetDevice(dev); cudaFree(dProjection_accum[dev*2]); cudaFree(dProjection_accum[dev*2+1]); } free(dProjection_accum); } freeGeoArray(splits,geoArray); cudaFreeHost(projParamsArrayHost); cudaFreeHost(projFloatsArrayHost); for (int i = 0; i < nStreams; ++i) cudaStreamDestroy(stream[i]) ; if (isHostRegisterSupported & splits>1){ cudaHostUnregister(img); } cudaCheckErrors("cudaFree fail"); // cudaDeviceReset(); return 0; } void CreateTextureInterp(int num_devices,const float* imagedata,Geometry geo,cudaArray** d_cuArrTex, cudaTextureObject_t *texImage,bool allocate) { //size_t size_image=geo.nVoxelX*geo.nVoxelY*geo.nVoxelZ; const cudaExtent extent = make_cudaExtent(geo.nVoxelX, geo.nVoxelY, geo.nVoxelZ); if(allocate){ for (unsigned int i = 0; i < num_devices; i++){ cudaSetDevice(i); //cudaArray Descriptor cudaChannelFormatDesc channelDesc = cudaCreateChannelDesc<float>(); //cuda Array cudaMalloc3DArray(&d_cuArrTex[i], &channelDesc, extent); cudaCheckErrors("Texture memory allocation fail"); } } for (unsigned int i = 0; i < num_devices; i++){ cudaMemcpy3DParms copyParams = {0}; cudaSetDevice(i); //Array creation copyParams.srcPtr = make_cudaPitchedPtr((void *)imagedata, extent.width*sizeof(float), extent.width, extent.height); copyParams.dstArray = d_cuArrTex[i]; copyParams.extent = extent; copyParams.kind = cudaMemcpyHostToDevice; cudaMemcpy3DAsync(&copyParams); //cudaCheckErrors("Texture memory data copy fail"); //Array creation End } for (unsigned int i = 0; i < num_devices; i++){ cudaSetDevice(i); cudaResourceDesc texRes; memset(&texRes, 0, sizeof(cudaResourceDesc)); texRes.resType = cudaResourceTypeArray; texRes.res.array.array = d_cuArrTex[i]; cudaTextureDesc texDescr; memset(&texDescr, 0, sizeof(cudaTextureDesc)); texDescr.normalizedCoords = false; if (geo.accuracy>1){ texDescr.filterMode = cudaFilterModePoint; geo.accuracy=1; } else{ texDescr.filterMode = cudaFilterModeLinear; } texDescr.addressMode[0] = cudaAddressModeBorder; texDescr.addressMode[1] = cudaAddressModeBorder; texDescr.addressMode[2] = cudaAddressModeBorder; texDescr.readMode = cudaReadModeElementType; cudaCreateTextureObject(&texImage[i], &texRes, &texDescr, NULL); cudaCheckErrors("Texture object creation fail"); } } /* This code generates the geometries needed to split the image properly in * cases where the entire image does not fit in the memory of the GPU **/ void splitImageInterp(unsigned int splits,Geometry geo,Geometry* geoArray, unsigned int nangles){ unsigned long splitsize=(geo.nVoxelZ+splits-1)/splits;// ceil if not divisible for(unsigned int sp=0;sp<splits;sp++){ geoArray[sp]=geo; // All of them are splitsize, but the last one, possible geoArray[sp].nVoxelZ=((sp+1)*splitsize<geo.nVoxelZ)? splitsize: geo.nVoxelZ-splitsize*sp; geoArray[sp].sVoxelZ= geoArray[sp].nVoxelZ* geoArray[sp].dVoxelZ; // We need to redefine the offsets, as now each subimage is not aligned in the origin. geoArray[sp].offOrigZ=(float *)malloc(nangles*sizeof(float)); for (unsigned int i=0;i<nangles;i++){ geoArray[sp].offOrigZ[i]=geo.offOrigZ[i]-geo.sVoxelZ/2+sp*geoArray[0].sVoxelZ+geoArray[sp].sVoxelZ/2; } } } /* This code precomputes The location of the source and the Delta U and delta V (in the warped space) * to compute the locations of the x-rays. While it seems verbose and overly-optimized, * it does saves about 30% of each of the kernel calls. Thats something! **/ void computeDeltas(Geometry geo,unsigned int i, Point3D* uvorigin, Point3D* deltaU, Point3D* deltaV, Point3D* source){ Point3D S; S.x=geo.DSO[i]; S.y=0; S.z=0; //End point Point3D P,Pu0,Pv0; P.x =-(geo.DSD[i]-geo.DSO[i]); P.y = geo.dDetecU*(0-((float)geo.nDetecU/2)+0.5); P.z = geo.dDetecV*(((float)geo.nDetecV/2)-0.5-0); Pu0.x=-(geo.DSD[i]-geo.DSO[i]); Pu0.y= geo.dDetecU*(1-((float)geo.nDetecU/2)+0.5); Pu0.z= geo.dDetecV*(((float)geo.nDetecV/2)-0.5-0); Pv0.x=-(geo.DSD[i]-geo.DSO[i]); Pv0.y= geo.dDetecU*(0-((float)geo.nDetecU/2)+0.5); Pv0.z= geo.dDetecV*(((float)geo.nDetecV/2)-0.5-1); // Geomtric trasnformations: // Now we have the Real world (OXYZ) coordinates of the bottom corner and its two neighbours. // The obkjective is to get a position of the detector in a coordinate system where: // 1-units are voxel size (in each direction can be different) // 2-The image has the its first voxel at (0,0,0) // 3-The image never rotates // To do that, we need to compute the "deltas" the detector, or "by how much // (in new xyz) does the voxels change when and index is added". To do that // several geometric steps needs to be changed //1.Roll,pitch,jaw // The detector can have a small rotation. // according to //"A geometric calibration method for cone beam CT systems" Yang K1, Kwan AL, Miller DF, Boone JM. Med Phys. 2006 Jun;33(6):1695-706. // Only the Z rotation will have a big influence in the image quality when they are small. // Still all rotations are supported // To roll pitch jaw, the detector has to be in centered in OXYZ. P.x=0;Pu0.x=0;Pv0.x=0; // Roll pitch yaw rollPitchYaw(geo,i,&P); rollPitchYaw(geo,i,&Pu0); rollPitchYaw(geo,i,&Pv0); //Now ltes translate the detector coordinates to DOD (original position on real coordinate system: P.x=P.x-(geo.DSD[i]-geo.DSO[i]); Pu0.x=Pu0.x-(geo.DSD[i]-geo.DSO[i]); Pv0.x=Pv0.x-(geo.DSD[i]-geo.DSO[i]); //2: Offset detector //S doesnt need to chagne //3: Rotate around RZ RY RZ Point3D Pfinal, Pfinalu0, Pfinalv0; Pfinal.x =P.x; Pfinal.y =P.y +geo.offDetecU[i]; Pfinal.z =P.z +geo.offDetecV[i]; Pfinalu0.x=Pu0.x; Pfinalu0.y=Pu0.y +geo.offDetecU[i]; Pfinalu0.z =Pu0.z +geo.offDetecV[i]; Pfinalv0.x=Pv0.x; Pfinalv0.y=Pv0.y +geo.offDetecU[i]; Pfinalv0.z =Pv0.z +geo.offDetecV[i]; eulerZYZ(geo,&Pfinal); eulerZYZ(geo,&Pfinalu0); eulerZYZ(geo,&Pfinalv0); eulerZYZ(geo,&S); //3: Offset image (instead of offseting image, -offset everything else) Pfinal.x =Pfinal.x-geo.offOrigX[i]; Pfinal.y =Pfinal.y-geo.offOrigY[i]; Pfinal.z =Pfinal.z-geo.offOrigZ[i]; Pfinalu0.x=Pfinalu0.x-geo.offOrigX[i]; Pfinalu0.y=Pfinalu0.y-geo.offOrigY[i]; Pfinalu0.z=Pfinalu0.z-geo.offOrigZ[i]; Pfinalv0.x=Pfinalv0.x-geo.offOrigX[i]; Pfinalv0.y=Pfinalv0.y-geo.offOrigY[i]; Pfinalv0.z=Pfinalv0.z-geo.offOrigZ[i]; S.x=S.x-geo.offOrigX[i]; S.y=S.y-geo.offOrigY[i]; S.z=S.z-geo.offOrigZ[i]; // As we want the (0,0,0) to be in a corner of the image, we need to translate everything (after rotation); Pfinal.x =Pfinal.x+geo.sVoxelX/2-geo.dVoxelX/2; Pfinal.y =Pfinal.y+geo.sVoxelY/2-geo.dVoxelY/2; Pfinal.z =Pfinal.z +geo.sVoxelZ/2-geo.dVoxelZ/2; Pfinalu0.x=Pfinalu0.x+geo.sVoxelX/2-geo.dVoxelX/2; Pfinalu0.y=Pfinalu0.y+geo.sVoxelY/2-geo.dVoxelY/2; Pfinalu0.z=Pfinalu0.z+geo.sVoxelZ/2-geo.dVoxelZ/2; Pfinalv0.x=Pfinalv0.x+geo.sVoxelX/2-geo.dVoxelX/2; Pfinalv0.y=Pfinalv0.y+geo.sVoxelY/2-geo.dVoxelY/2; Pfinalv0.z=Pfinalv0.z+geo.sVoxelZ/2-geo.dVoxelZ/2; S.x =S.x+geo.sVoxelX/2-geo.dVoxelX/2; S.y =S.y+geo.sVoxelY/2-geo.dVoxelY/2; S.z =S.z +geo.sVoxelZ/2-geo.dVoxelZ/2; //4. Scale everything so dVoxel==1 Pfinal.x =Pfinal.x/geo.dVoxelX; Pfinal.y =Pfinal.y/geo.dVoxelY; Pfinal.z =Pfinal.z/geo.dVoxelZ; Pfinalu0.x=Pfinalu0.x/geo.dVoxelX; Pfinalu0.y=Pfinalu0.y/geo.dVoxelY; Pfinalu0.z=Pfinalu0.z/geo.dVoxelZ; Pfinalv0.x=Pfinalv0.x/geo.dVoxelX; Pfinalv0.y=Pfinalv0.y/geo.dVoxelY; Pfinalv0.z=Pfinalv0.z/geo.dVoxelZ; S.x =S.x/geo.dVoxelX; S.y =S.y/geo.dVoxelY; S.z =S.z/geo.dVoxelZ; //mexPrintf("COR: %f \n",geo.COR[i]); //5. apply COR. Wherever everything was, now its offesetd by a bit. // Only wors for standard rotaiton, not aribtary axis rotation. float CORx, CORy; CORx=-geo.COR[i]*sin(geo.alpha)/geo.dVoxelX; CORy= geo.COR[i]*cos(geo.alpha)/geo.dVoxelY; Pfinal.x+=CORx; Pfinal.y+=CORy; Pfinalu0.x+=CORx; Pfinalu0.y+=CORy; Pfinalv0.x+=CORx; Pfinalv0.y+=CORy; S.x+=CORx; S.y+=CORy; // return *uvorigin=Pfinal; deltaU->x=Pfinalu0.x-Pfinal.x; deltaU->y=Pfinalu0.y-Pfinal.y; deltaU->z=Pfinalu0.z-Pfinal.z; deltaV->x=Pfinalv0.x-Pfinal.x; deltaV->y=Pfinalv0.y-Pfinal.y; deltaV->z=Pfinalv0.z-Pfinal.z; *source=S; } float maxdistanceCuboid(Geometry geo,unsigned int i){ /////////// // Compute initial "t" so we access safely as less as out of bounds as possible. ////////// float maxCubX,maxCubY,maxCubZ; // Forgetting Z, compute mas distance: diagonal+offset maxCubX=(geo.nVoxelX/2+ abs(geo.offOrigX[i])/geo.dVoxelX); maxCubY=(geo.nVoxelY/2+ abs(geo.offOrigY[i])/geo.dVoxelY); maxCubZ=(geo.nVoxelZ/2+ abs(geo.offOrigZ[i])/geo.dVoxelZ); float a,b; a=geo.DSO[i]/geo.dVoxelX; b=geo.DSO[i]/geo.dVoxelY; // As the return of this value is in "voxel space", the source may have an elliptical curve. // The distance returned is the safe distance that can be skipped for a given angle alpha, before we need to start sampling. if (geo.theta==0.0f & geo.psi==0.0f) // Special case, it will make the code faster return max(a*b/sqrt(a*a*sin(geo.alpha)*sin(geo.alpha)+b*b*cos(geo.alpha)*cos(geo.alpha))- sqrt(maxCubX*maxCubX+maxCubY*maxCubY),0.0f); //TODO: think of more special cases? return max(geo.DSO[i]/max(max(geo.dVoxelX,geo.dVoxelY),geo.dVoxelZ)-sqrt(maxCubX*maxCubX+maxCubY*maxCubY+maxCubZ*maxCubZ),0.0f); } void rollPitchYaw(Geometry geo,unsigned int i, Point3D* point){ Point3D auxPoint; auxPoint.x=point->x; auxPoint.y=point->y; auxPoint.z=point->z; point->x=cos(geo.dRoll[i])*cos(geo.dPitch[i])*auxPoint.x +(cos(geo.dRoll[i])*sin(geo.dPitch[i])*sin(geo.dYaw[i]) - sin(geo.dRoll[i])*cos(geo.dYaw[i]))*auxPoint.y +(cos(geo.dRoll[i])*sin(geo.dPitch[i])*cos(geo.dYaw[i]) + sin(geo.dRoll[i])*sin(geo.dYaw[i]))*auxPoint.z; point->y=sin(geo.dRoll[i])*cos(geo.dPitch[i])*auxPoint.x +(sin(geo.dRoll[i])*sin(geo.dPitch[i])*sin(geo.dYaw[i]) + cos(geo.dRoll[i])*cos(geo.dYaw[i]))*auxPoint.y +(sin(geo.dRoll[i])*sin(geo.dPitch[i])*cos(geo.dYaw[i]) - cos(geo.dRoll[i])*sin(geo.dYaw[i]))*auxPoint.z; point->z=-sin(geo.dPitch[i])*auxPoint.x +cos(geo.dPitch[i])*sin(geo.dYaw[i])*auxPoint.y +cos(geo.dPitch[i])*cos(geo.dYaw[i])*auxPoint.z; } void eulerZYZ(Geometry geo, Point3D* point){ Point3D auxPoint; auxPoint.x=point->x; auxPoint.y=point->y; auxPoint.z=point->z; point->x=(+cos(geo.alpha)*cos(geo.theta)*cos(geo.psi)-sin(geo.alpha)*sin(geo.psi))*auxPoint.x+ (-cos(geo.alpha)*cos(geo.theta)*sin(geo.psi)-sin(geo.alpha)*cos(geo.psi))*auxPoint.y+ cos(geo.alpha)*sin(geo.theta)*auxPoint.z; point->y=(+sin(geo.alpha)*cos(geo.theta)*cos(geo.psi)+cos(geo.alpha)*sin(geo.psi))*auxPoint.x+ (-sin(geo.alpha)*cos(geo.theta)*sin(geo.psi)+cos(geo.alpha)*cos(geo.psi))*auxPoint.y+ sin(geo.alpha)*sin(geo.theta)*auxPoint.z; point->z=-sin(geo.theta)*cos(geo.psi)*auxPoint.x+ sin(geo.theta)*sin(geo.psi)*auxPoint.y+ cos(geo.theta)*auxPoint.z; } //______________________________________________________________________________ // // Function: freeGeoArray // // Description: Frees the memory from the geometry array for multiGPU. //______________________________________________________________________________ void freeGeoArray(unsigned int splits,Geometry* geoArray){ for(unsigned int sp=0;sp<splits;sp++){ free(geoArray[sp].offOrigZ); } free(geoArray); } //______________________________________________________________________________ // // Function: checkFreeMemory // // Description: check available memory on devices //______________________________________________________________________________ void checkFreeMemory(int deviceCount,size_t *mem_GPU_global){ size_t memfree; size_t memtotal; for (int dev = 0; dev < deviceCount; dev++){ cudaSetDevice(dev); cudaMemGetInfo(&memfree,&memtotal); if(dev==0) *mem_GPU_global=memfree; if(memfree<memtotal/2){ mexErrMsgIdAndTxt("tvDenoise:tvdenoising:GPU","One (or more) of your GPUs is being heavily used by another program (possibly graphics-based).\n Free the GPU to run TIGRE\n"); } cudaCheckErrors("Check mem error"); *mem_GPU_global=(memfree<*mem_GPU_global)?memfree:*mem_GPU_global; } *mem_GPU_global=(size_t)((double)*mem_GPU_global*0.95); //*mem_GPU_global= insert your known number here, in bytes. }
da89d347cb606672d21ddb722e3fe1283ae0aae1.hip
// !!! This is a file automatically generated by hipify!!! #include <hip/hip_runtime.h> #include <stdio.h> #include <time.h> #include <stdlib.h> #include <ctime> #define CheckErrorUtil(err) CheckError(err, __FUNCTION__, __LINE__) #define CheckErrorMsgUtil(err, msg) CheckErrorMsg(err, msg, __FUNCTION__, __LINE__) inline void CheckError(hipError_t const err, char const* const fun, const int line) { if (err) { printf("CUDA Error Code[%d]: %s\n%s() Line:%d\n", err, hipGetErrorString(err), fun, line); exit(1); } } inline void CheckErrorMsg(hipError_t const err, char const* const msg, char const* const fun, int const line) { if (err) { printf("CUDA Error Code[%d]: %s\n%s() Line:%d\n%s\n", err, hipGetErrorString(err), fun, line, msg); exit(1); } } void GenerateTestArrays(int const N, float* const a, float* const b, float* const c, float* const ref); void CompareArrays(int const N, float const* const a, float const* const b); __global__ void ArraysSum(float* const a, float* const b, float* const c, int const N) { int i = blockIdx.x*blockDim.x + threadIdx.x; if (i < N) c[i] = a[i] + b[i]; } void GenerateTestArrays(int const N, float* const a, float* const b, float* const c, float* const ref) { int i; srand((unsigned)time(NULL)); for(i = 0; i < N; i++) { a[i] = (float)rand(); b[i] = (float)rand(); c[i] = 0.0f; ref[i] = a[i] + b[i]; } } void CompareArrays(int const N, float const* const a, float const* const b) { int i; int different = 0; for(i = 0; i < N; i++) { different = (a[i] != b[i]); if(different) break; } if(different) { printf("\n\n*** Oh No! The GPU failed to sum the arrays. ***\n\n\n"); } else { printf("\n\n*** Awesome! The GPU summed the arrays!! ***\n\n\n"); } } int main() { std::clock_t start; start = std::clock(); dim3 gridSize; dim3 blockSize; int const N = 4096000; size_t const N_BYTES = N * sizeof(float); int const BLOCK_SIZE = 512; float *aH, *bH, *cH, *refH; float *aD, *bD, *cD; aH = (float*)malloc(N_BYTES); bH = (float*)malloc(N_BYTES); cH = (float*)malloc(N_BYTES); refH = (float*)malloc(N_BYTES); printf("\n\nGenerating 2 random float arrays on Host - each of size %lu bytes...\n", N_BYTES); GenerateTestArrays(N, aH, bH, cH, refH); printf("Allocating %lu bytes on Device GPU to store the 2 generated arrays...\n", 2 * N_BYTES); CheckErrorUtil(hipMalloc((void**)&aD, N_BYTES)); CheckErrorUtil(hipMalloc((void**)&bD, N_BYTES)); printf("Allocating %lu bytes on Device GPU to store the result array after summing the 2 arrays...\n", N_BYTES); CheckErrorUtil(hipMalloc((void**)&cD, N_BYTES)); // Use CUDA streams to manage the concurrency of copying and executing hipStream_t stream; hipStreamCreate(&stream); printf("Copying 2 arrays from Host to Device GPU using Streams...\n"); hipMemcpyAsync(aD, aH, N_BYTES, hipMemcpyHostToDevice, stream); hipMemcpyAsync(bD, bH, N_BYTES, hipMemcpyHostToDevice, stream); blockSize.x = BLOCK_SIZE; blockSize.y = 1; blockSize.z = 1; gridSize.x = ((N + BLOCK_SIZE - 1) / BLOCK_SIZE); gridSize.y = 1; gridSize.z = 1; printf("Summing the 2 arrays and storing the result array on Device GPU...\n"); hipLaunchKernelGGL(( ArraysSum), dim3(gridSize), dim3(blockSize), 0, 0, aD, bD, cD, N); printf("Synchronizing the Device GPU memory before copying the result array back to Host...\n"); CheckErrorUtil(hipStreamSynchronize(stream)); printf("Copying result array from Device GPU to Host...\n"); CheckErrorUtil(hipMemcpyAsync(cH, cD, N_BYTES, hipMemcpyDeviceToHost, stream)); printf("Comparing expected result array stored on Host with actual result calculated on Device GPU...\n"); CompareArrays(N, cH, refH); printf("Freeing %lu bytes on Device GPU...\n", 3 * N_BYTES); CheckErrorUtil(hipFree(aD)); CheckErrorUtil(hipFree(bD)); CheckErrorUtil(hipFree(cD)); printf("Freeing memory on Host...\n"); free(aH); free(bH); free(cH); free(refH); printf("Resetting Device GPU as though nothing ever happened!\n\n"); hipDeviceReset(); printf("Executed in %.f milliseconds.\n\n", (std::clock() - start) / (double)(CLOCKS_PER_SEC / 1000)); return 0; }
da89d347cb606672d21ddb722e3fe1283ae0aae1.cu
#include <cuda_runtime.h> #include <stdio.h> #include <time.h> #include <stdlib.h> #include <ctime> #define CheckErrorUtil(err) CheckError(err, __FUNCTION__, __LINE__) #define CheckErrorMsgUtil(err, msg) CheckErrorMsg(err, msg, __FUNCTION__, __LINE__) inline void CheckError(cudaError_t const err, char const* const fun, const int line) { if (err) { printf("CUDA Error Code[%d]: %s\n%s() Line:%d\n", err, cudaGetErrorString(err), fun, line); exit(1); } } inline void CheckErrorMsg(cudaError_t const err, char const* const msg, char const* const fun, int const line) { if (err) { printf("CUDA Error Code[%d]: %s\n%s() Line:%d\n%s\n", err, cudaGetErrorString(err), fun, line, msg); exit(1); } } void GenerateTestArrays(int const N, float* const a, float* const b, float* const c, float* const ref); void CompareArrays(int const N, float const* const a, float const* const b); __global__ void ArraysSum(float* const a, float* const b, float* const c, int const N) { int i = blockIdx.x*blockDim.x + threadIdx.x; if (i < N) c[i] = a[i] + b[i]; } void GenerateTestArrays(int const N, float* const a, float* const b, float* const c, float* const ref) { int i; srand((unsigned)time(NULL)); for(i = 0; i < N; i++) { a[i] = (float)rand(); b[i] = (float)rand(); c[i] = 0.0f; ref[i] = a[i] + b[i]; } } void CompareArrays(int const N, float const* const a, float const* const b) { int i; int different = 0; for(i = 0; i < N; i++) { different = (a[i] != b[i]); if(different) break; } if(different) { printf("\n\n*** Oh No! The GPU failed to sum the arrays. ***\n\n\n"); } else { printf("\n\n*** Awesome! The GPU summed the arrays!! ***\n\n\n"); } } int main() { std::clock_t start; start = std::clock(); dim3 gridSize; dim3 blockSize; int const N = 4096000; size_t const N_BYTES = N * sizeof(float); int const BLOCK_SIZE = 512; float *aH, *bH, *cH, *refH; float *aD, *bD, *cD; aH = (float*)malloc(N_BYTES); bH = (float*)malloc(N_BYTES); cH = (float*)malloc(N_BYTES); refH = (float*)malloc(N_BYTES); printf("\n\nGenerating 2 random float arrays on Host - each of size %lu bytes...\n", N_BYTES); GenerateTestArrays(N, aH, bH, cH, refH); printf("Allocating %lu bytes on Device GPU to store the 2 generated arrays...\n", 2 * N_BYTES); CheckErrorUtil(cudaMalloc((void**)&aD, N_BYTES)); CheckErrorUtil(cudaMalloc((void**)&bD, N_BYTES)); printf("Allocating %lu bytes on Device GPU to store the result array after summing the 2 arrays...\n", N_BYTES); CheckErrorUtil(cudaMalloc((void**)&cD, N_BYTES)); // Use CUDA streams to manage the concurrency of copying and executing cudaStream_t stream; cudaStreamCreate(&stream); printf("Copying 2 arrays from Host to Device GPU using Streams...\n"); cudaMemcpyAsync(aD, aH, N_BYTES, cudaMemcpyHostToDevice, stream); cudaMemcpyAsync(bD, bH, N_BYTES, cudaMemcpyHostToDevice, stream); blockSize.x = BLOCK_SIZE; blockSize.y = 1; blockSize.z = 1; gridSize.x = ((N + BLOCK_SIZE - 1) / BLOCK_SIZE); gridSize.y = 1; gridSize.z = 1; printf("Summing the 2 arrays and storing the result array on Device GPU...\n"); ArraysSum<<<gridSize, blockSize>>>(aD, bD, cD, N); printf("Synchronizing the Device GPU memory before copying the result array back to Host...\n"); CheckErrorUtil(cudaStreamSynchronize(stream)); printf("Copying result array from Device GPU to Host...\n"); CheckErrorUtil(cudaMemcpyAsync(cH, cD, N_BYTES, cudaMemcpyDeviceToHost, stream)); printf("Comparing expected result array stored on Host with actual result calculated on Device GPU...\n"); CompareArrays(N, cH, refH); printf("Freeing %lu bytes on Device GPU...\n", 3 * N_BYTES); CheckErrorUtil(cudaFree(aD)); CheckErrorUtil(cudaFree(bD)); CheckErrorUtil(cudaFree(cD)); printf("Freeing memory on Host...\n"); free(aH); free(bH); free(cH); free(refH); printf("Resetting Device GPU as though nothing ever happened!\n\n"); cudaDeviceReset(); printf("Executed in %.f milliseconds.\n\n", (std::clock() - start) / (double)(CLOCKS_PER_SEC / 1000)); return 0; }
dadc2b126d3941664d3c038ca95aecb531879e51.hip
// !!! This is a file automatically generated by hipify!!! #include "reduce_hip.cuh" #include <cstdio> #include <cstdlib> #include <iostream> #include <new> int main(int argc, char *argv[]) { int N = atoi(argv[1]); int threads_per_block = atoi(argv[2]); int *arr = new (std::nothrow) int[N]; for (int i = 0; i < N; ++i) { arr[i] = 1; } // memset(arr, 1, sizeof(arr)); WRONG !! // std::cout << "main arr[0] = " << arr[0] << '\n'; hipEvent_t start; hipEvent_t stop; hipEventCreate(&start); hipEventCreate(&stop); hipEventRecord(start); int sum = reduce(arr, N, threads_per_block); hipEventRecord(stop); hipEventSynchronize(stop); float ms; hipEventElapsedTime(&ms, start, stop); std::cout << sum << '\n'; std::cout << ms << '\n'; delete[] arr; return 0; }
dadc2b126d3941664d3c038ca95aecb531879e51.cu
#include "reduce.cuh" #include <cstdio> #include <cstdlib> #include <iostream> #include <new> int main(int argc, char *argv[]) { int N = atoi(argv[1]); int threads_per_block = atoi(argv[2]); int *arr = new (std::nothrow) int[N]; for (int i = 0; i < N; ++i) { arr[i] = 1; } // memset(arr, 1, sizeof(arr)); WRONG !! // std::cout << "main arr[0] = " << arr[0] << '\n'; cudaEvent_t start; cudaEvent_t stop; cudaEventCreate(&start); cudaEventCreate(&stop); cudaEventRecord(start); int sum = reduce(arr, N, threads_per_block); cudaEventRecord(stop); cudaEventSynchronize(stop); float ms; cudaEventElapsedTime(&ms, start, stop); std::cout << sum << '\n'; std::cout << ms << '\n'; delete[] arr; return 0; }
febe274cbbb549f6fe3a56b0c48b4c15ccbd3147.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include "mex.h" /* Kernel to square elements of the array on the GPU */ __global__ void square_elements(float* in, float* out, int N) { int idx = blockIdx.x*blockDim.x+threadIdx.x; if ( idx < N) out[idx]=in[idx]*in[idx]; } /* Gateway function */ void mexFunction(int nlhs, mxArray *plhs[], int nrhs, const mxArray *prhs[]) { int i, j, m, n; double *data1, *data2; float *data1f, *data2f; float *data1f_gpu, *data2f_gpu; mxClassID category; if (nrhs != nlhs) mexErrMsgTxt("The number of input and output arguments must be the same."); for (i = 0; i < nrhs; i++) { /* Find the dimensions of the data */ m = mxGetM(prhs[i]); n = mxGetN(prhs[i]); /* Create an mxArray for the output data */ plhs[i] = mxCreateDoubleMatrix(m, n, mxREAL); /* Create an input and output data array on the GPU*/ hipMalloc( (void **) &data1f_gpu,sizeof(float)*m*n); hipMalloc( (void **) &data2f_gpu,sizeof(float)*m*n); /* Retrieve the input data */ data1 = mxGetPr(prhs[i]); /* Check if the input array is single or double precision */ category = mxGetClassID(prhs[i]); if( category == mxSINGLE_CLASS) { /* The input array is single precision, it can be sent directly to the card */ hipMemcpy( data1f_gpu, data1, sizeof(float)*m*n, hipMemcpyHostToDevice); } if( category == mxDOUBLE_CLASS) { /* The input array is in double precision, it needs to be converted t floats before being sent to the card */ data1f = (float *) mxMalloc(sizeof(float)*m*n); for (j = 0; j < m*n; j++) { data1f[j] = (float) data1[j]; } hipMemcpy( data1f_gpu, data1f, sizeof(float)*n*m, hipMemcpyHostToDevice); } data2f = (float *) mxMalloc(sizeof(float)*m*n); /* Compute execution configuration using 128 threads per block */ dim3 dimBlock(128); dim3 dimGrid((m*n)/dimBlock.x); if ( (n*m) % 128 !=0 ) dimGrid.x+=1; /* Call function on GPU */hipLaunchKernelGGL(( square_elements), dim3(dimGrid),dim3(dimBlock), 0, 0, data1f_gpu, data2f_gpu, n*m); /* Copy result back to host */ hipMemcpy( data2f, data2f_gpu, sizeof(float)*n*m, hipMemcpyDeviceToHost); /* Create a pointer to the output data */ data2 = mxGetPr(plhs[i]); /* Convert from single to double before returning */ for (j = 0; j < m*n; j++) { data2[j] = (double) data2f[j]; } /* Clean-up memory on device and host */ mxFree(data1f); mxFree(data2f); hipFree(data1f_gpu); hipFree(data2f_gpu); } }
febe274cbbb549f6fe3a56b0c48b4c15ccbd3147.cu
#include "cuda.h" #include "mex.h" /* Kernel to square elements of the array on the GPU */ __global__ void square_elements(float* in, float* out, int N) { int idx = blockIdx.x*blockDim.x+threadIdx.x; if ( idx < N) out[idx]=in[idx]*in[idx]; } /* Gateway function */ void mexFunction(int nlhs, mxArray *plhs[], int nrhs, const mxArray *prhs[]) { int i, j, m, n; double *data1, *data2; float *data1f, *data2f; float *data1f_gpu, *data2f_gpu; mxClassID category; if (nrhs != nlhs) mexErrMsgTxt("The number of input and output arguments must be the same."); for (i = 0; i < nrhs; i++) { /* Find the dimensions of the data */ m = mxGetM(prhs[i]); n = mxGetN(prhs[i]); /* Create an mxArray for the output data */ plhs[i] = mxCreateDoubleMatrix(m, n, mxREAL); /* Create an input and output data array on the GPU*/ cudaMalloc( (void **) &data1f_gpu,sizeof(float)*m*n); cudaMalloc( (void **) &data2f_gpu,sizeof(float)*m*n); /* Retrieve the input data */ data1 = mxGetPr(prhs[i]); /* Check if the input array is single or double precision */ category = mxGetClassID(prhs[i]); if( category == mxSINGLE_CLASS) { /* The input array is single precision, it can be sent directly to the card */ cudaMemcpy( data1f_gpu, data1, sizeof(float)*m*n, cudaMemcpyHostToDevice); } if( category == mxDOUBLE_CLASS) { /* The input array is in double precision, it needs to be converted t floats before being sent to the card */ data1f = (float *) mxMalloc(sizeof(float)*m*n); for (j = 0; j < m*n; j++) { data1f[j] = (float) data1[j]; } cudaMemcpy( data1f_gpu, data1f, sizeof(float)*n*m, cudaMemcpyHostToDevice); } data2f = (float *) mxMalloc(sizeof(float)*m*n); /* Compute execution configuration using 128 threads per block */ dim3 dimBlock(128); dim3 dimGrid((m*n)/dimBlock.x); if ( (n*m) % 128 !=0 ) dimGrid.x+=1; /* Call function on GPU */ square_elements<<<dimGrid,dimBlock>>>(data1f_gpu, data2f_gpu, n*m); /* Copy result back to host */ cudaMemcpy( data2f, data2f_gpu, sizeof(float)*n*m, cudaMemcpyDeviceToHost); /* Create a pointer to the output data */ data2 = mxGetPr(plhs[i]); /* Convert from single to double before returning */ for (j = 0; j < m*n; j++) { data2[j] = (double) data2f[j]; } /* Clean-up memory on device and host */ mxFree(data1f); mxFree(data2f); cudaFree(data1f_gpu); cudaFree(data2f_gpu); } }
ca70b0b21425b93474503015721a8312cf50a250.hip
// !!! This is a file automatically generated by hipify!!! #include "IncoherentSumPdf.hh" #include "devcomplex.hh" #include "ResonancePdf.hh" const int resonanceOffset_incoherent = 4; // Offset of the first resonance into the parameter index array. // Notice that this is different from the TddpPdf case because there's no time information. // In particular the offset consists of nP, constant index, number of resonances, and cache index. MEM_DEVICE devcomplex<fptype>* cResonanceValues[10]; EXEC_TARGET inline int parIndexFromResIndex_incoherent (int resIndex) { return resonanceOffset_incoherent + resIndex*resonanceSize; } EXEC_TARGET fptype device_incoherent (fptype* evt, fptype* p, unsigned int* indices) { // Calculates the incoherent sum over the resonances. int evtNum = (int) FLOOR(0.5 + evt[indices[4 + indices[0]]]); fptype ret = 0; unsigned int numResonances = indices[2]; unsigned int cacheToUse = indices[3]; for (int i = 0; i < numResonances; ++i) { int paramIndex = parIndexFromResIndex_incoherent(i); fptype amplitude = p[indices[paramIndex+0]]; devcomplex<fptype> matrixelement = cResonanceValues[cacheToUse][evtNum*numResonances + i]; ret += amplitude * matrixelement.abs2(); } // Multiply by efficiency int effFunctionIdx = parIndexFromResIndex_incoherent(numResonances); fptype eff = callFunction(evt, indices[effFunctionIdx], indices[effFunctionIdx + 1]); ret *= eff; return ret; } MEM_DEVICE device_function_ptr ptr_to_incoherent = device_incoherent; __host__ IncoherentSumPdf::IncoherentSumPdf (std::string n, Variable* m12, Variable* m13, Variable* eventNumber, DecayInfo* decay, GooPdf* eff) : GooPdf(0, n) , decayInfo(decay) , _m12(m12) , _m13(m13) , dalitzNormRange(0) , cachedResonances(0) , integrals(0) , forceRedoIntegrals(true) , totalEventSize(3) // Default 3 = m12, m13, evtNum. Will likely be overridden. , cacheToUse(0) , efficiency(eff) , integrators(0) , calculators(0) { registerObservable(_m12); registerObservable(_m13); registerObservable(eventNumber); std::vector<unsigned int> pindices; pindices.push_back(registerConstants(5)); fptype decayConstants[5]; decayConstants[0] = decayInfo->motherMass; decayConstants[1] = decayInfo->daug1Mass; decayConstants[2] = decayInfo->daug2Mass; decayConstants[3] = decayInfo->daug3Mass; decayConstants[4] = decayInfo->meson_radius; MEMCPY_TO_SYMBOL(functorConstants, decayConstants, 5*sizeof(fptype), cIndex*sizeof(fptype), hipMemcpyHostToDevice); pindices.push_back(decayInfo->resonances.size()); static int cacheCount = 0; cacheToUse = cacheCount++; pindices.push_back(cacheToUse); for (std::vector<ResonancePdf*>::iterator res = decayInfo->resonances.begin(); res != decayInfo->resonances.end(); ++res) { pindices.push_back(registerParameter((*res)->amp_real)); pindices.push_back(registerParameter((*res)->amp_real)); // Not going to use amp_imag, but need a dummy index so the resonance size will be consistent. pindices.push_back((*res)->getFunctionIndex()); pindices.push_back((*res)->getParameterIndex()); (*res)->setConstantIndex(cIndex); components.push_back(*res); } pindices.push_back(efficiency->getFunctionIndex()); pindices.push_back(efficiency->getParameterIndex()); components.push_back(efficiency); GET_FUNCTION_ADDR(ptr_to_incoherent); initialise(pindices); redoIntegral = new bool[decayInfo->resonances.size()]; cachedMasses = new fptype[decayInfo->resonances.size()]; cachedWidths = new fptype[decayInfo->resonances.size()]; integrals = new double[decayInfo->resonances.size()]; for (int i = 0; i < decayInfo->resonances.size(); ++i) { redoIntegral[i] = true; cachedMasses[i] = -1; cachedWidths[i] = -1; integrals[i] = 0; } integrators = new SpecialIncoherentIntegrator*[decayInfo->resonances.size()]; calculators = new SpecialIncoherentResonanceCalculator*[decayInfo->resonances.size()]; for (int i = 0; i < decayInfo->resonances.size(); ++i) { integrators[i] = new SpecialIncoherentIntegrator(parameters, i); calculators[i] = new SpecialIncoherentResonanceCalculator(parameters, i); } addSpecialMask(PdfBase::ForceSeparateNorm); } __host__ void IncoherentSumPdf::setDataSize (unsigned int dataSize, unsigned int evtSize) { // Default 3 is m12, m13, evtNum totalEventSize = evtSize; assert(totalEventSize >= 3); if (cachedResonances) { delete cachedResonances; } numEntries = dataSize; cachedResonances = new DEVICE_VECTOR<devcomplex<fptype> >(dataSize*decayInfo->resonances.size()); void* dummy = thrust::raw_pointer_cast(cachedResonances->data()); MEMCPY_TO_SYMBOL(cResonanceValues, &dummy, sizeof(devcomplex<fptype>*), cacheToUse*sizeof(devcomplex<fptype>*), hipMemcpyHostToDevice); setForceIntegrals(); } __host__ fptype IncoherentSumPdf::normalise () const { recursiveSetNormalisation(1); // Not going to normalise efficiency, // so set normalisation factor to 1 so it doesn't get multiplied by zero. // Copy at this time to ensure that the SpecialCalculators, which need the efficiency, // don't get zeroes through multiplying by the normFactor. MEMCPY_TO_SYMBOL(normalisationFactors, host_normalisation, totalParams*sizeof(fptype), 0, hipMemcpyHostToDevice); int totalBins = _m12->numbins * _m13->numbins; if (!dalitzNormRange) { gooMalloc((void**) &dalitzNormRange, 6*sizeof(fptype)); fptype* host_norms = new fptype[6]; host_norms[0] = _m12->lowerlimit; host_norms[1] = _m12->upperlimit; host_norms[2] = _m12->numbins; host_norms[3] = _m13->lowerlimit; host_norms[4] = _m13->upperlimit; host_norms[5] = _m13->numbins; MEMCPY(dalitzNormRange, host_norms, 6*sizeof(fptype), hipMemcpyHostToDevice); delete[] host_norms; } // Check if efficiency changes force redoing the integrals. if (efficiency->parametersChanged()) { forceRedoIntegrals = true; efficiency->storeParameters(); } // Check for changed masses or forced integral redo. for (unsigned int i = 0; i < decayInfo->resonances.size(); ++i) { redoIntegral[i] = forceRedoIntegrals; if (!(decayInfo->resonances[i]->parametersChanged())) continue; redoIntegral[i] = true; decayInfo->resonances[i]->storeParameters(); } forceRedoIntegrals = false; thrust::constant_iterator<fptype*> arrayAddress(dalitzNormRange); thrust::counting_iterator<int> binIndex(0); // NB, SpecialIncoherentResonanceCalculator assumes that fit is unbinned! // And it needs to know the total event size, not just observables // for this particular PDF component. thrust::constant_iterator<fptype*> dataArray(dev_event_array); thrust::constant_iterator<int> eventSize(totalEventSize); thrust::counting_iterator<int> eventIndex(0); for (int i = 0; i < decayInfo->resonances.size(); ++i) { if (redoIntegral[i]) { thrust::transform(thrust::make_zip_iterator(thrust::make_tuple(eventIndex, dataArray, eventSize)), thrust::make_zip_iterator(thrust::make_tuple(eventIndex + numEntries, arrayAddress, eventSize)), strided_range<DEVICE_VECTOR<devcomplex<fptype> >::iterator>(cachedResonances->begin() + i, cachedResonances->end(), decayInfo->resonances.size()).begin(), *(calculators[i])); fptype dummy = 0; static thrust::plus<fptype> cudaPlus; integrals[i] = thrust::transform_reduce(thrust::make_zip_iterator(thrust::make_tuple(binIndex, arrayAddress)), thrust::make_zip_iterator(thrust::make_tuple(binIndex + totalBins, arrayAddress)), *(integrators[i]), dummy, cudaPlus); } } // End of time-consuming integrals and caching of BWs over Dalitz plot. fptype ret = 0; for (unsigned int i = 0; i < decayInfo->resonances.size(); ++i) { int param_i = parameters + resonanceOffset_incoherent + resonanceSize * i; fptype amplitude = host_params[host_indices[param_i]]; ret += amplitude * integrals[i]; } double binSizeFactor = 1; binSizeFactor *= ((_m12->upperlimit - _m12->lowerlimit) / _m12->numbins); binSizeFactor *= ((_m13->upperlimit - _m13->lowerlimit) / _m13->numbins); ret *= binSizeFactor; host_normalisation[parameters] = 1.0/ret; return (fptype) ret; } SpecialIncoherentIntegrator::SpecialIncoherentIntegrator (int pIdx, unsigned int ri) : resonance_i(ri) , parameters(pIdx) {} EXEC_TARGET fptype SpecialIncoherentIntegrator::operator () (thrust::tuple<int, fptype*> t) const { // Returns integral of specific BW over Dalitz plot, to be cached and // multiplied by rapidly-changing amplitude. // Bin index, base address [lower, upper, numbins] // Notice that this is basically MetricTaker::operator (binned) with the special-case knowledge // that event size is two, and that the function to call is getResonanceAmplitude. int globalBinNumber = thrust::get<0>(t); fptype lowerBoundM12 = thrust::get<1>(t)[0]; fptype upperBoundM12 = thrust::get<1>(t)[1]; int numBinsM12 = (int) FLOOR(thrust::get<1>(t)[2] + 0.5); int binNumberM12 = globalBinNumber % numBinsM12; fptype binCenterM12 = upperBoundM12 - lowerBoundM12; binCenterM12 /= numBinsM12; binCenterM12 *= (binNumberM12 + 0.5); binCenterM12 += lowerBoundM12; globalBinNumber /= numBinsM12; fptype lowerBoundM13 = thrust::get<1>(t)[3]; fptype upperBoundM13 = thrust::get<1>(t)[4]; int numBinsM13 = (int) FLOOR(thrust::get<1>(t)[5] + 0.5); fptype binCenterM13 = upperBoundM13 - lowerBoundM13; binCenterM13 /= numBinsM13; binCenterM13 *= (globalBinNumber + 0.5); binCenterM13 += lowerBoundM13; unsigned int* indices = paramIndices + parameters; fptype motherMass = functorConstants[indices[1] + 0]; fptype daug1Mass = functorConstants[indices[1] + 1]; fptype daug2Mass = functorConstants[indices[1] + 2]; fptype daug3Mass = functorConstants[indices[1] + 3]; if (!inDalitz(binCenterM12, binCenterM13, motherMass, daug1Mass, daug2Mass, daug3Mass)) return 0; int parameter_i = parIndexFromResIndex_incoherent(resonance_i); // Find position of this resonance relative to TDDP start unsigned int functn_i = indices[parameter_i+2]; unsigned int params_i = indices[parameter_i+3]; fptype m23 = motherMass*motherMass + daug1Mass*daug1Mass + daug2Mass*daug2Mass + daug3Mass*daug3Mass - binCenterM12 - binCenterM13; devcomplex<fptype> ret = getResonanceAmplitude(binCenterM12, binCenterM13, m23, functn_i, params_i); unsigned int numResonances = indices[2]; fptype fakeEvt[10]; // Need room for many observables in case m12 or m13 were assigned a high index in an event-weighted fit. fakeEvt[indices[indices[0] + 2 + 0]] = binCenterM12; fakeEvt[indices[indices[0] + 2 + 1]] = binCenterM13; int effFunctionIdx = parIndexFromResIndex_incoherent(numResonances); fptype eff = callFunction(fakeEvt, indices[effFunctionIdx], indices[effFunctionIdx + 1]); return ret.abs2() * eff; } SpecialIncoherentResonanceCalculator::SpecialIncoherentResonanceCalculator (int pIdx, unsigned int res_idx) : resonance_i(res_idx) , parameters(pIdx) {} EXEC_TARGET devcomplex<fptype> SpecialIncoherentResonanceCalculator::operator () (thrust::tuple<int, fptype*, int> t) const { // Returns the BW, or other resonance function, for a specific resonance. // Is special because the value is expected to change slowly, so it's // useful to cache the result. int evtNum = thrust::get<0>(t); fptype* evt = thrust::get<1>(t) + (evtNum * thrust::get<2>(t)); unsigned int* indices = paramIndices + parameters; // Jump to TDDP position within parameters array fptype m12 = evt[indices[2 + indices[0]]]; fptype m13 = evt[indices[3 + indices[0]]]; fptype motherMass = functorConstants[indices[1] + 0]; fptype daug1Mass = functorConstants[indices[1] + 1]; fptype daug2Mass = functorConstants[indices[1] + 2]; fptype daug3Mass = functorConstants[indices[1] + 3]; if (!inDalitz(m12, m13, motherMass, daug1Mass, daug2Mass, daug3Mass)) return devcomplex<fptype>(0, 0); fptype m23 = motherMass*motherMass + daug1Mass*daug1Mass + daug2Mass*daug2Mass + daug3Mass*daug3Mass - m12 - m13; int parameter_i = parIndexFromResIndex_incoherent(resonance_i); // Find position of this resonance relative to TDDP start unsigned int functn_i = indices[parameter_i+2]; unsigned int params_i = indices[parameter_i+3]; devcomplex<fptype> ret = getResonanceAmplitude(m12, m13, m23, functn_i, params_i); return ret; }
ca70b0b21425b93474503015721a8312cf50a250.cu
#include "IncoherentSumPdf.hh" #include "devcomplex.hh" #include "ResonancePdf.hh" const int resonanceOffset_incoherent = 4; // Offset of the first resonance into the parameter index array. // Notice that this is different from the TddpPdf case because there's no time information. // In particular the offset consists of nP, constant index, number of resonances, and cache index. MEM_DEVICE devcomplex<fptype>* cResonanceValues[10]; EXEC_TARGET inline int parIndexFromResIndex_incoherent (int resIndex) { return resonanceOffset_incoherent + resIndex*resonanceSize; } EXEC_TARGET fptype device_incoherent (fptype* evt, fptype* p, unsigned int* indices) { // Calculates the incoherent sum over the resonances. int evtNum = (int) FLOOR(0.5 + evt[indices[4 + indices[0]]]); fptype ret = 0; unsigned int numResonances = indices[2]; unsigned int cacheToUse = indices[3]; for (int i = 0; i < numResonances; ++i) { int paramIndex = parIndexFromResIndex_incoherent(i); fptype amplitude = p[indices[paramIndex+0]]; devcomplex<fptype> matrixelement = cResonanceValues[cacheToUse][evtNum*numResonances + i]; ret += amplitude * matrixelement.abs2(); } // Multiply by efficiency int effFunctionIdx = parIndexFromResIndex_incoherent(numResonances); fptype eff = callFunction(evt, indices[effFunctionIdx], indices[effFunctionIdx + 1]); ret *= eff; return ret; } MEM_DEVICE device_function_ptr ptr_to_incoherent = device_incoherent; __host__ IncoherentSumPdf::IncoherentSumPdf (std::string n, Variable* m12, Variable* m13, Variable* eventNumber, DecayInfo* decay, GooPdf* eff) : GooPdf(0, n) , decayInfo(decay) , _m12(m12) , _m13(m13) , dalitzNormRange(0) , cachedResonances(0) , integrals(0) , forceRedoIntegrals(true) , totalEventSize(3) // Default 3 = m12, m13, evtNum. Will likely be overridden. , cacheToUse(0) , efficiency(eff) , integrators(0) , calculators(0) { registerObservable(_m12); registerObservable(_m13); registerObservable(eventNumber); std::vector<unsigned int> pindices; pindices.push_back(registerConstants(5)); fptype decayConstants[5]; decayConstants[0] = decayInfo->motherMass; decayConstants[1] = decayInfo->daug1Mass; decayConstants[2] = decayInfo->daug2Mass; decayConstants[3] = decayInfo->daug3Mass; decayConstants[4] = decayInfo->meson_radius; MEMCPY_TO_SYMBOL(functorConstants, decayConstants, 5*sizeof(fptype), cIndex*sizeof(fptype), cudaMemcpyHostToDevice); pindices.push_back(decayInfo->resonances.size()); static int cacheCount = 0; cacheToUse = cacheCount++; pindices.push_back(cacheToUse); for (std::vector<ResonancePdf*>::iterator res = decayInfo->resonances.begin(); res != decayInfo->resonances.end(); ++res) { pindices.push_back(registerParameter((*res)->amp_real)); pindices.push_back(registerParameter((*res)->amp_real)); // Not going to use amp_imag, but need a dummy index so the resonance size will be consistent. pindices.push_back((*res)->getFunctionIndex()); pindices.push_back((*res)->getParameterIndex()); (*res)->setConstantIndex(cIndex); components.push_back(*res); } pindices.push_back(efficiency->getFunctionIndex()); pindices.push_back(efficiency->getParameterIndex()); components.push_back(efficiency); GET_FUNCTION_ADDR(ptr_to_incoherent); initialise(pindices); redoIntegral = new bool[decayInfo->resonances.size()]; cachedMasses = new fptype[decayInfo->resonances.size()]; cachedWidths = new fptype[decayInfo->resonances.size()]; integrals = new double[decayInfo->resonances.size()]; for (int i = 0; i < decayInfo->resonances.size(); ++i) { redoIntegral[i] = true; cachedMasses[i] = -1; cachedWidths[i] = -1; integrals[i] = 0; } integrators = new SpecialIncoherentIntegrator*[decayInfo->resonances.size()]; calculators = new SpecialIncoherentResonanceCalculator*[decayInfo->resonances.size()]; for (int i = 0; i < decayInfo->resonances.size(); ++i) { integrators[i] = new SpecialIncoherentIntegrator(parameters, i); calculators[i] = new SpecialIncoherentResonanceCalculator(parameters, i); } addSpecialMask(PdfBase::ForceSeparateNorm); } __host__ void IncoherentSumPdf::setDataSize (unsigned int dataSize, unsigned int evtSize) { // Default 3 is m12, m13, evtNum totalEventSize = evtSize; assert(totalEventSize >= 3); if (cachedResonances) { delete cachedResonances; } numEntries = dataSize; cachedResonances = new DEVICE_VECTOR<devcomplex<fptype> >(dataSize*decayInfo->resonances.size()); void* dummy = thrust::raw_pointer_cast(cachedResonances->data()); MEMCPY_TO_SYMBOL(cResonanceValues, &dummy, sizeof(devcomplex<fptype>*), cacheToUse*sizeof(devcomplex<fptype>*), cudaMemcpyHostToDevice); setForceIntegrals(); } __host__ fptype IncoherentSumPdf::normalise () const { recursiveSetNormalisation(1); // Not going to normalise efficiency, // so set normalisation factor to 1 so it doesn't get multiplied by zero. // Copy at this time to ensure that the SpecialCalculators, which need the efficiency, // don't get zeroes through multiplying by the normFactor. MEMCPY_TO_SYMBOL(normalisationFactors, host_normalisation, totalParams*sizeof(fptype), 0, cudaMemcpyHostToDevice); int totalBins = _m12->numbins * _m13->numbins; if (!dalitzNormRange) { gooMalloc((void**) &dalitzNormRange, 6*sizeof(fptype)); fptype* host_norms = new fptype[6]; host_norms[0] = _m12->lowerlimit; host_norms[1] = _m12->upperlimit; host_norms[2] = _m12->numbins; host_norms[3] = _m13->lowerlimit; host_norms[4] = _m13->upperlimit; host_norms[5] = _m13->numbins; MEMCPY(dalitzNormRange, host_norms, 6*sizeof(fptype), cudaMemcpyHostToDevice); delete[] host_norms; } // Check if efficiency changes force redoing the integrals. if (efficiency->parametersChanged()) { forceRedoIntegrals = true; efficiency->storeParameters(); } // Check for changed masses or forced integral redo. for (unsigned int i = 0; i < decayInfo->resonances.size(); ++i) { redoIntegral[i] = forceRedoIntegrals; if (!(decayInfo->resonances[i]->parametersChanged())) continue; redoIntegral[i] = true; decayInfo->resonances[i]->storeParameters(); } forceRedoIntegrals = false; thrust::constant_iterator<fptype*> arrayAddress(dalitzNormRange); thrust::counting_iterator<int> binIndex(0); // NB, SpecialIncoherentResonanceCalculator assumes that fit is unbinned! // And it needs to know the total event size, not just observables // for this particular PDF component. thrust::constant_iterator<fptype*> dataArray(dev_event_array); thrust::constant_iterator<int> eventSize(totalEventSize); thrust::counting_iterator<int> eventIndex(0); for (int i = 0; i < decayInfo->resonances.size(); ++i) { if (redoIntegral[i]) { thrust::transform(thrust::make_zip_iterator(thrust::make_tuple(eventIndex, dataArray, eventSize)), thrust::make_zip_iterator(thrust::make_tuple(eventIndex + numEntries, arrayAddress, eventSize)), strided_range<DEVICE_VECTOR<devcomplex<fptype> >::iterator>(cachedResonances->begin() + i, cachedResonances->end(), decayInfo->resonances.size()).begin(), *(calculators[i])); fptype dummy = 0; static thrust::plus<fptype> cudaPlus; integrals[i] = thrust::transform_reduce(thrust::make_zip_iterator(thrust::make_tuple(binIndex, arrayAddress)), thrust::make_zip_iterator(thrust::make_tuple(binIndex + totalBins, arrayAddress)), *(integrators[i]), dummy, cudaPlus); } } // End of time-consuming integrals and caching of BWs over Dalitz plot. fptype ret = 0; for (unsigned int i = 0; i < decayInfo->resonances.size(); ++i) { int param_i = parameters + resonanceOffset_incoherent + resonanceSize * i; fptype amplitude = host_params[host_indices[param_i]]; ret += amplitude * integrals[i]; } double binSizeFactor = 1; binSizeFactor *= ((_m12->upperlimit - _m12->lowerlimit) / _m12->numbins); binSizeFactor *= ((_m13->upperlimit - _m13->lowerlimit) / _m13->numbins); ret *= binSizeFactor; host_normalisation[parameters] = 1.0/ret; return (fptype) ret; } SpecialIncoherentIntegrator::SpecialIncoherentIntegrator (int pIdx, unsigned int ri) : resonance_i(ri) , parameters(pIdx) {} EXEC_TARGET fptype SpecialIncoherentIntegrator::operator () (thrust::tuple<int, fptype*> t) const { // Returns integral of specific BW over Dalitz plot, to be cached and // multiplied by rapidly-changing amplitude. // Bin index, base address [lower, upper, numbins] // Notice that this is basically MetricTaker::operator (binned) with the special-case knowledge // that event size is two, and that the function to call is getResonanceAmplitude. int globalBinNumber = thrust::get<0>(t); fptype lowerBoundM12 = thrust::get<1>(t)[0]; fptype upperBoundM12 = thrust::get<1>(t)[1]; int numBinsM12 = (int) FLOOR(thrust::get<1>(t)[2] + 0.5); int binNumberM12 = globalBinNumber % numBinsM12; fptype binCenterM12 = upperBoundM12 - lowerBoundM12; binCenterM12 /= numBinsM12; binCenterM12 *= (binNumberM12 + 0.5); binCenterM12 += lowerBoundM12; globalBinNumber /= numBinsM12; fptype lowerBoundM13 = thrust::get<1>(t)[3]; fptype upperBoundM13 = thrust::get<1>(t)[4]; int numBinsM13 = (int) FLOOR(thrust::get<1>(t)[5] + 0.5); fptype binCenterM13 = upperBoundM13 - lowerBoundM13; binCenterM13 /= numBinsM13; binCenterM13 *= (globalBinNumber + 0.5); binCenterM13 += lowerBoundM13; unsigned int* indices = paramIndices + parameters; fptype motherMass = functorConstants[indices[1] + 0]; fptype daug1Mass = functorConstants[indices[1] + 1]; fptype daug2Mass = functorConstants[indices[1] + 2]; fptype daug3Mass = functorConstants[indices[1] + 3]; if (!inDalitz(binCenterM12, binCenterM13, motherMass, daug1Mass, daug2Mass, daug3Mass)) return 0; int parameter_i = parIndexFromResIndex_incoherent(resonance_i); // Find position of this resonance relative to TDDP start unsigned int functn_i = indices[parameter_i+2]; unsigned int params_i = indices[parameter_i+3]; fptype m23 = motherMass*motherMass + daug1Mass*daug1Mass + daug2Mass*daug2Mass + daug3Mass*daug3Mass - binCenterM12 - binCenterM13; devcomplex<fptype> ret = getResonanceAmplitude(binCenterM12, binCenterM13, m23, functn_i, params_i); unsigned int numResonances = indices[2]; fptype fakeEvt[10]; // Need room for many observables in case m12 or m13 were assigned a high index in an event-weighted fit. fakeEvt[indices[indices[0] + 2 + 0]] = binCenterM12; fakeEvt[indices[indices[0] + 2 + 1]] = binCenterM13; int effFunctionIdx = parIndexFromResIndex_incoherent(numResonances); fptype eff = callFunction(fakeEvt, indices[effFunctionIdx], indices[effFunctionIdx + 1]); return ret.abs2() * eff; } SpecialIncoherentResonanceCalculator::SpecialIncoherentResonanceCalculator (int pIdx, unsigned int res_idx) : resonance_i(res_idx) , parameters(pIdx) {} EXEC_TARGET devcomplex<fptype> SpecialIncoherentResonanceCalculator::operator () (thrust::tuple<int, fptype*, int> t) const { // Returns the BW, or other resonance function, for a specific resonance. // Is special because the value is expected to change slowly, so it's // useful to cache the result. int evtNum = thrust::get<0>(t); fptype* evt = thrust::get<1>(t) + (evtNum * thrust::get<2>(t)); unsigned int* indices = paramIndices + parameters; // Jump to TDDP position within parameters array fptype m12 = evt[indices[2 + indices[0]]]; fptype m13 = evt[indices[3 + indices[0]]]; fptype motherMass = functorConstants[indices[1] + 0]; fptype daug1Mass = functorConstants[indices[1] + 1]; fptype daug2Mass = functorConstants[indices[1] + 2]; fptype daug3Mass = functorConstants[indices[1] + 3]; if (!inDalitz(m12, m13, motherMass, daug1Mass, daug2Mass, daug3Mass)) return devcomplex<fptype>(0, 0); fptype m23 = motherMass*motherMass + daug1Mass*daug1Mass + daug2Mass*daug2Mass + daug3Mass*daug3Mass - m12 - m13; int parameter_i = parIndexFromResIndex_incoherent(resonance_i); // Find position of this resonance relative to TDDP start unsigned int functn_i = indices[parameter_i+2]; unsigned int params_i = indices[parameter_i+3]; devcomplex<fptype> ret = getResonanceAmplitude(m12, m13, m23, functn_i, params_i); return ret; }
de484143254c96c7a0e1be3ecd4cb0a1e8ae8190.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include <wb.h> #define TILE_WIDTH 8 #define wbCheck(stmt) do { \ hipError_t err = stmt; \ if (err != hipSuccess) { \ wbLog(ERROR, "Failed to run stmt ", #stmt); \ return -1; \ } \ } while(0) // Compute C = A * B __global__ void matrixMultiply(float * A, float * B, float * C, int numARows, int numAColumns, int numBColumns ) { //@@ Insert code to implement matrix multiplication here int Row = blockIdx.y * blockDim.y + threadIdx.y; int Col = blockIdx.x * blockDim.x + threadIdx.x; if((Row < numARows) && (Col < numBColumns)) { float CValue = .0; for( int i = 0; i < numAColumns; ++i ) CValue += A[Row*numAColumns+i] * B[Col+i*numBColumns]; C[ Row*numBColumns + Col ] = CValue; } } int main(int argc, char ** argv) { wbArg_t args; float * hostA; // The A matrix float * hostB; // The B matrix float * hostC; // The output C matrix float * deviceA; float * deviceB; float * deviceC; int numARows; // number of rows in the matrix A int numAColumns; // number of columns in the matrix A int numBRows; // number of rows in the matrix B int numBColumns; // number of columns in the matrix B int numCRows; // number of rows in the matrix C (you have to set this) int numCColumns; // number of columns in the matrix C (you have to set this) args = wbArg_read(argc, argv); wbTime_start(Generic, "Importing data and creating memory on host"); hostA = (float *) wbImport(wbArg_getInputFile(args, 0), &numARows, &numAColumns); hostB = (float *) wbImport(wbArg_getInputFile(args, 1), &numBRows, &numBColumns); size_t bytesA, bytesB, bytesC; //@@ Set numCRows and numCColumns numCRows = numARows; numCColumns = numBColumns; bytesA = numARows*numAColumns*sizeof(float); bytesB = numBRows*numBColumns*sizeof(float); bytesC = numCRows*numCColumns*sizeof(float); //@@ Allocate the hostC matrix hostC = (float*) calloc( numCRows * numCColumns, sizeof(float) ); wbTime_stop(Generic, "Importing data and creating memory on host"); wbLog(TRACE, "The dimensions of A are ", numARows, " x ", numAColumns); wbLog(TRACE, "The dimensions of B are ", numBRows, " x ", numBColumns); wbTime_start(GPU, "Allocating GPU memory."); //@@ Allocate GPU memory here wbCheck(hipMalloc((void**)&deviceA, bytesA)); wbCheck(hipMalloc((void**)&deviceB, bytesB)); wbCheck(hipMalloc((void**)&deviceC, bytesC)); wbTime_stop(GPU, "Allocating GPU memory."); wbTime_start(GPU, "Copying input memory to the GPU."); //@@ Copy memory to the GPU here wbCheck(hipMemcpy(deviceA,hostA,bytesA,hipMemcpyHostToDevice)); wbCheck(hipMemcpy(deviceB,hostB,bytesB,hipMemcpyHostToDevice)); wbTime_stop(GPU, "Copying input memory to the GPU."); // n = numBRows, m = numARows //@@ Initialize the grid and block dimensions here dim3 dimGrid( (numBColumns-1)/TILE_WIDTH+1, (numARows-1)/TILE_WIDTH+1, 1 ); dim3 dimBlock( TILE_WIDTH, TILE_WIDTH, 1 ); wbTime_start(Compute, "Performing CUDA computation"); //@@ Launch the GPU Kernel here hipLaunchKernelGGL(( matrixMultiply), dim3(dimGrid), dim3(dimBlock) , 0, 0, deviceA, deviceB, deviceC , numARows, numAColumns, numBColumns ); hipDeviceSynchronize(); wbTime_stop(Compute, "Performing CUDA computation"); wbTime_start(Copy, "Copying output memory to the CPU"); //@@ Copy the GPU memory back to the CPU here wbCheck(hipMemcpy( hostC, deviceC, bytesC, hipMemcpyDeviceToHost )); wbTime_stop(Copy, "Copying output memory to the CPU"); wbTime_start(GPU, "Freeing GPU Memory"); //@@ Free the GPU memory here wbTime_stop(GPU, "Freeing GPU Memory"); wbSolution(args, hostC, numCRows, numCColumns); free(hostA); free(hostB); free(hostC); return 0; }
de484143254c96c7a0e1be3ecd4cb0a1e8ae8190.cu
#include <wb.h> #define TILE_WIDTH 8 #define wbCheck(stmt) do { \ cudaError_t err = stmt; \ if (err != cudaSuccess) { \ wbLog(ERROR, "Failed to run stmt ", #stmt); \ return -1; \ } \ } while(0) // Compute C = A * B __global__ void matrixMultiply(float * A, float * B, float * C, int numARows, int numAColumns, int numBColumns ) { //@@ Insert code to implement matrix multiplication here int Row = blockIdx.y * blockDim.y + threadIdx.y; int Col = blockIdx.x * blockDim.x + threadIdx.x; if((Row < numARows) && (Col < numBColumns)) { float CValue = .0; for( int i = 0; i < numAColumns; ++i ) CValue += A[Row*numAColumns+i] * B[Col+i*numBColumns]; C[ Row*numBColumns + Col ] = CValue; } } int main(int argc, char ** argv) { wbArg_t args; float * hostA; // The A matrix float * hostB; // The B matrix float * hostC; // The output C matrix float * deviceA; float * deviceB; float * deviceC; int numARows; // number of rows in the matrix A int numAColumns; // number of columns in the matrix A int numBRows; // number of rows in the matrix B int numBColumns; // number of columns in the matrix B int numCRows; // number of rows in the matrix C (you have to set this) int numCColumns; // number of columns in the matrix C (you have to set this) args = wbArg_read(argc, argv); wbTime_start(Generic, "Importing data and creating memory on host"); hostA = (float *) wbImport(wbArg_getInputFile(args, 0), &numARows, &numAColumns); hostB = (float *) wbImport(wbArg_getInputFile(args, 1), &numBRows, &numBColumns); size_t bytesA, bytesB, bytesC; //@@ Set numCRows and numCColumns numCRows = numARows; numCColumns = numBColumns; bytesA = numARows*numAColumns*sizeof(float); bytesB = numBRows*numBColumns*sizeof(float); bytesC = numCRows*numCColumns*sizeof(float); //@@ Allocate the hostC matrix hostC = (float*) calloc( numCRows * numCColumns, sizeof(float) ); wbTime_stop(Generic, "Importing data and creating memory on host"); wbLog(TRACE, "The dimensions of A are ", numARows, " x ", numAColumns); wbLog(TRACE, "The dimensions of B are ", numBRows, " x ", numBColumns); wbTime_start(GPU, "Allocating GPU memory."); //@@ Allocate GPU memory here wbCheck(cudaMalloc((void**)&deviceA, bytesA)); wbCheck(cudaMalloc((void**)&deviceB, bytesB)); wbCheck(cudaMalloc((void**)&deviceC, bytesC)); wbTime_stop(GPU, "Allocating GPU memory."); wbTime_start(GPU, "Copying input memory to the GPU."); //@@ Copy memory to the GPU here wbCheck(cudaMemcpy(deviceA,hostA,bytesA,cudaMemcpyHostToDevice)); wbCheck(cudaMemcpy(deviceB,hostB,bytesB,cudaMemcpyHostToDevice)); wbTime_stop(GPU, "Copying input memory to the GPU."); // n = numBRows, m = numARows //@@ Initialize the grid and block dimensions here dim3 dimGrid( (numBColumns-1)/TILE_WIDTH+1, (numARows-1)/TILE_WIDTH+1, 1 ); dim3 dimBlock( TILE_WIDTH, TILE_WIDTH, 1 ); wbTime_start(Compute, "Performing CUDA computation"); //@@ Launch the GPU Kernel here matrixMultiply<<< dimGrid, dimBlock >>>( deviceA, deviceB, deviceC , numARows, numAColumns, numBColumns ); cudaThreadSynchronize(); wbTime_stop(Compute, "Performing CUDA computation"); wbTime_start(Copy, "Copying output memory to the CPU"); //@@ Copy the GPU memory back to the CPU here wbCheck(cudaMemcpy( hostC, deviceC, bytesC, cudaMemcpyDeviceToHost )); wbTime_stop(Copy, "Copying output memory to the CPU"); wbTime_start(GPU, "Freeing GPU Memory"); //@@ Free the GPU memory here wbTime_stop(GPU, "Freeing GPU Memory"); wbSolution(args, hostC, numCRows, numCColumns); free(hostA); free(hostB); free(hostC); return 0; }
62699295f4ae0ecdcbcaba50ab159f4da6e86036.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" // includes, system #include <stdlib.h> #include <stdio.h> #include <string.h> #include <math.h> #include <float.h> // includes, project #include <cutil_inline.h> #define NUM_ELEMENTS 262144 // MIN = 512, MAX = 33553920 #define MAX_THREADS 512 #define LAST_THREAD (MAX_THREADS - 1) #define MAX_THREAD_DIVISOR ((float)(1.0 / MAX_THREADS)) // includes, kernels #include <compact_stream_kernel.cu> //////////////////////////////////////////////////////////////////////////////// // declaration, forward void compact_stream(void); extern "C" void compact_stream_gold(float *reference, float *idata, unsigned int *len); int main( int argc, char** argv) { compact_stream(); cutilExit(argc, argv); } //////////////////////////////////////////////////////////////////////////////// //! Run a scan test for CUDA //////////////////////////////////////////////////////////////////////////////// void compact_stream(void) { unsigned int num_elements = NUM_ELEMENTS; unsigned int cpuTimer, gpuTimer, kernelTimer; cutilCheckError(cutCreateTimer(&cpuTimer)); cutilCheckError(cutCreateTimer(&gpuTimer)); cutilCheckError(cutCreateTimer(&kernelTimer)); const unsigned int num_blocks = (num_elements + MAX_THREADS - 1) / MAX_THREADS; const unsigned int num_blocks_blocks = (num_blocks + MAX_THREADS - 1) / MAX_THREADS; const unsigned int mem_size = sizeof(float) * num_elements; const unsigned int int_size = sizeof(int) * MAX_THREADS * num_blocks; const unsigned int sum_size = sizeof(int) * MAX_THREADS * num_blocks_blocks; // allocate host memory to store the input data float *h_data = (float*)malloc(mem_size); // initialize the input data on the host to be integer values // between 0 and 1000, both positive and negative float rand_number; for (unsigned int i = 0; i < num_elements; ++i) { rand_number = rand()/(float)RAND_MAX; if(rand_number > 0.5) h_data[i] = floorf(1000 * (rand() / (float)RAND_MAX)); else h_data[i] = -floorf(1000 * (rand() / (float)RAND_MAX)); } // compute reference solution cutStartTimer(cpuTimer); float *reference = (float*)malloc(mem_size); unsigned int reference_length = num_elements; compact_stream_gold(reference, h_data, &reference_length); cutStopTimer(cpuTimer); // compute gpu solution cutStartTimer(gpuTimer); // allocate device memory input/output arrays and processing arrays float *d_input, *d_output; cutilSafeCall(hipMalloc((void**)&d_input, mem_size)); cutilSafeCall(hipMalloc((void**)&d_output, mem_size)); int *d_flags, *d_scans, *d_sums, *d_increments; cutilSafeCall(hipMalloc((void**)&d_flags, int_size)); cutilSafeCall(hipMalloc((void**)&d_scans, int_size)); cutilSafeCall(hipMalloc((void**)&d_sums, sum_size)); cutilSafeCall(hipMalloc((void**)&d_increments, sum_size)); // copy host memory to device input array and initialize others cutilSafeCall(hipMemcpy(d_input, h_data, mem_size, hipMemcpyHostToDevice)); cutilSafeCall(hipMemset(d_output, 0, mem_size)); cutilSafeCall(hipMemset(d_flags, 0, int_size)); cutilSafeCall(hipMemset(d_scans, 0, int_size)); cutilSafeCall(hipMemset(d_sums, 0, sum_size)); cutilSafeCall(hipMemset(d_increments, 0, sum_size)); dim3 grid(num_blocks, 1, 1); dim3 grid_sums(num_blocks_blocks, 1, 1); dim3 threads(MAX_THREADS, 1, 1); // make sure there are no CUDA errors before we start cutilCheckMsg("Kernel execution failed"); // execute the kernels cutStartTimer(kernelTimer); hipLaunchKernelGGL(( flag_and_scan), dim3(grid), dim3(threads) , 0, 0, d_input, d_scans, d_flags, d_sums); hipDeviceSynchronize(); hipLaunchKernelGGL(( scan_sums), dim3(grid_sums), dim3(threads) , 0, 0, d_sums, d_increments, num_blocks); hipDeviceSynchronize(); hipLaunchKernelGGL(( add_increments), dim3(grid), dim3(threads) , 0, 0, d_scans, d_increments); hipDeviceSynchronize(); hipLaunchKernelGGL(( compact_stream), dim3(grid), dim3(threads) , 0, 0, d_output, d_input, d_flags, d_scans); hipDeviceSynchronize(); cutStopTimer(kernelTimer); // check for any errors cutilCheckMsg("Kernel execution failed"); // copy result from device to host cutilSafeCall(hipMemcpy(h_data, d_output, mem_size, hipMemcpyDeviceToHost)); cutStopTimer(gpuTimer); // perform comparison and print statistics float epsilon = 0.0f; unsigned int result_regtest = cutComparefe(reference, h_data, reference_length, epsilon); printf("%s: Test %s\n\n", "compact_stream", (1 == result_regtest) ? "PASSED" : "FAILED"); printf("CPU time: %f ms\n", cutGetTimerValue(cpuTimer)); printf("GPU time: %f ms\n", cutGetTimerValue(gpuTimer)); printf("Kernel time: %f ms\n", cutGetTimerValue(kernelTimer)); printf("Overheard: %f ms\n", (cutGetTimerValue(gpuTimer) - cutGetTimerValue(kernelTimer))); cutResetTimer(cpuTimer); cutResetTimer(gpuTimer); cutResetTimer(kernelTimer); // cleanup memory free(h_data); free(reference); cutilSafeCall(hipFree(d_input)); cutilSafeCall(hipFree(d_output)); cutilSafeCall(hipFree(d_flags)); cutilSafeCall(hipFree(d_scans)); cutilSafeCall(hipFree(d_sums)); cutilSafeCall(hipFree(d_increments)); cutilCheckError(cutDeleteTimer(cpuTimer)); cutilCheckError(cutDeleteTimer(gpuTimer)); cutilCheckError(cutDeleteTimer(kernelTimer)); hipDeviceReset(); }
62699295f4ae0ecdcbcaba50ab159f4da6e86036.cu
// includes, system #include <stdlib.h> #include <stdio.h> #include <string.h> #include <math.h> #include <float.h> // includes, project #include <cutil_inline.h> #define NUM_ELEMENTS 262144 // MIN = 512, MAX = 33553920 #define MAX_THREADS 512 #define LAST_THREAD (MAX_THREADS - 1) #define MAX_THREAD_DIVISOR ((float)(1.0 / MAX_THREADS)) // includes, kernels #include <compact_stream_kernel.cu> //////////////////////////////////////////////////////////////////////////////// // declaration, forward void compact_stream(void); extern "C" void compact_stream_gold(float *reference, float *idata, unsigned int *len); int main( int argc, char** argv) { compact_stream(); cutilExit(argc, argv); } //////////////////////////////////////////////////////////////////////////////// //! Run a scan test for CUDA //////////////////////////////////////////////////////////////////////////////// void compact_stream(void) { unsigned int num_elements = NUM_ELEMENTS; unsigned int cpuTimer, gpuTimer, kernelTimer; cutilCheckError(cutCreateTimer(&cpuTimer)); cutilCheckError(cutCreateTimer(&gpuTimer)); cutilCheckError(cutCreateTimer(&kernelTimer)); const unsigned int num_blocks = (num_elements + MAX_THREADS - 1) / MAX_THREADS; const unsigned int num_blocks_blocks = (num_blocks + MAX_THREADS - 1) / MAX_THREADS; const unsigned int mem_size = sizeof(float) * num_elements; const unsigned int int_size = sizeof(int) * MAX_THREADS * num_blocks; const unsigned int sum_size = sizeof(int) * MAX_THREADS * num_blocks_blocks; // allocate host memory to store the input data float *h_data = (float*)malloc(mem_size); // initialize the input data on the host to be integer values // between 0 and 1000, both positive and negative float rand_number; for (unsigned int i = 0; i < num_elements; ++i) { rand_number = rand()/(float)RAND_MAX; if(rand_number > 0.5) h_data[i] = floorf(1000 * (rand() / (float)RAND_MAX)); else h_data[i] = -floorf(1000 * (rand() / (float)RAND_MAX)); } // compute reference solution cutStartTimer(cpuTimer); float *reference = (float*)malloc(mem_size); unsigned int reference_length = num_elements; compact_stream_gold(reference, h_data, &reference_length); cutStopTimer(cpuTimer); // compute gpu solution cutStartTimer(gpuTimer); // allocate device memory input/output arrays and processing arrays float *d_input, *d_output; cutilSafeCall(cudaMalloc((void**)&d_input, mem_size)); cutilSafeCall(cudaMalloc((void**)&d_output, mem_size)); int *d_flags, *d_scans, *d_sums, *d_increments; cutilSafeCall(cudaMalloc((void**)&d_flags, int_size)); cutilSafeCall(cudaMalloc((void**)&d_scans, int_size)); cutilSafeCall(cudaMalloc((void**)&d_sums, sum_size)); cutilSafeCall(cudaMalloc((void**)&d_increments, sum_size)); // copy host memory to device input array and initialize others cutilSafeCall(cudaMemcpy(d_input, h_data, mem_size, cudaMemcpyHostToDevice)); cutilSafeCall(cudaMemset(d_output, 0, mem_size)); cutilSafeCall(cudaMemset(d_flags, 0, int_size)); cutilSafeCall(cudaMemset(d_scans, 0, int_size)); cutilSafeCall(cudaMemset(d_sums, 0, sum_size)); cutilSafeCall(cudaMemset(d_increments, 0, sum_size)); dim3 grid(num_blocks, 1, 1); dim3 grid_sums(num_blocks_blocks, 1, 1); dim3 threads(MAX_THREADS, 1, 1); // make sure there are no CUDA errors before we start cutilCheckMsg("Kernel execution failed"); // execute the kernels cutStartTimer(kernelTimer); flag_and_scan<<< grid, threads >>>(d_input, d_scans, d_flags, d_sums); cudaThreadSynchronize(); scan_sums<<< grid_sums, threads >>>(d_sums, d_increments, num_blocks); cudaThreadSynchronize(); add_increments<<< grid, threads >>>(d_scans, d_increments); cudaThreadSynchronize(); compact_stream<<< grid, threads >>>(d_output, d_input, d_flags, d_scans); cudaThreadSynchronize(); cutStopTimer(kernelTimer); // check for any errors cutilCheckMsg("Kernel execution failed"); // copy result from device to host cutilSafeCall(cudaMemcpy(h_data, d_output, mem_size, cudaMemcpyDeviceToHost)); cutStopTimer(gpuTimer); // perform comparison and print statistics float epsilon = 0.0f; unsigned int result_regtest = cutComparefe(reference, h_data, reference_length, epsilon); printf("%s: Test %s\n\n", "compact_stream", (1 == result_regtest) ? "PASSED" : "FAILED"); printf("CPU time: %f ms\n", cutGetTimerValue(cpuTimer)); printf("GPU time: %f ms\n", cutGetTimerValue(gpuTimer)); printf("Kernel time: %f ms\n", cutGetTimerValue(kernelTimer)); printf("Overheard: %f ms\n", (cutGetTimerValue(gpuTimer) - cutGetTimerValue(kernelTimer))); cutResetTimer(cpuTimer); cutResetTimer(gpuTimer); cutResetTimer(kernelTimer); // cleanup memory free(h_data); free(reference); cutilSafeCall(cudaFree(d_input)); cutilSafeCall(cudaFree(d_output)); cutilSafeCall(cudaFree(d_flags)); cutilSafeCall(cudaFree(d_scans)); cutilSafeCall(cudaFree(d_sums)); cutilSafeCall(cudaFree(d_increments)); cutilCheckError(cutDeleteTimer(cpuTimer)); cutilCheckError(cutDeleteTimer(gpuTimer)); cutilCheckError(cutDeleteTimer(kernelTimer)); cudaThreadExit(); }
c890f7207158cc6879ca1d7cd2072ef8b2ec9997.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include "subgraph_generator.cuh" #include "graph.cuh" #include "subgraph.cuh" #include "gpu_error_check.cuh" const unsigned int NUM_THREADS = 64; const unsigned int THRESHOLD_THREAD = 50000; __global__ void prePrefix(unsigned int *activeNodesLabeling, u_int64_t *activeNodesDegree, u_int64_t *outDegree, bool *label1, bool *label2, unsigned int numNodes) { unsigned int id = blockDim.x * blockIdx.x + threadIdx.x; if(id < numNodes){ activeNodesLabeling[id] = label1[id] || label2[id]; // label1 is always zero in sync //activeNodesLabeling[id] = label[id]; //activeNodesLabeling[id] = 1; activeNodesDegree[id] = 0; if(activeNodesLabeling[id] == 1) activeNodesDegree[id] = outDegree[id]; } } __global__ void prePrefix(unsigned int *activeNodesLabeling, u_int64_t *activeNodesDegree, u_int64_t *outDegree, float *delta, unsigned int numNodes, float acc) { unsigned int id = blockDim.x * blockIdx.x + threadIdx.x; if(id < numNodes){ if(delta[id] > acc) { activeNodesLabeling[id] = 1; } else { activeNodesLabeling[id] = 0; } activeNodesDegree[id] = 0; if(activeNodesLabeling[id] == 1) activeNodesDegree[id] = outDegree[id]; } } __global__ void makeQueue(unsigned int *activeNodes, unsigned int *activeNodesLabeling, unsigned int *prefixLabeling, unsigned int numNodes) { unsigned int id = blockDim.x * blockIdx.x + threadIdx.x; if(id < numNodes && activeNodesLabeling[id] == 1){ activeNodes[prefixLabeling[id]] = id; } } __global__ void makeActiveNodesPointer(u_int64_t *activeNodesPointer, unsigned int *activeNodesLabeling, unsigned int *prefixLabeling, u_int64_t *prefixSumDegrees, unsigned int numNodes) { unsigned int id = blockDim.x * blockIdx.x + threadIdx.x; if(id < numNodes && activeNodesLabeling[id] == 1){ activeNodesPointer[prefixLabeling[id]] = prefixSumDegrees[id]; } } // pthread template <class E> void dynamic(unsigned int tId, unsigned int numThreads, unsigned int numActiveNodes, unsigned int *activeNodes, u_int64_t *outDegree, u_int64_t *activeNodesPointer, u_int64_t *nodePointer, E *activeEdgeList, E *edgeList) { unsigned int chunkSize = ceil(numActiveNodes / numThreads); unsigned int left, right; left = tId * chunkSize; right = min(left+chunkSize, numActiveNodes); unsigned int thisNode; u_int64_t thisDegree; u_int64_t fromHere; u_int64_t fromThere; for(unsigned int i=left; i<right; i++) { thisNode = activeNodes[i]; thisDegree = outDegree[thisNode]; fromHere = activeNodesPointer[i]; fromThere = nodePointer[thisNode]; for(u_int64_t j=0; j<thisDegree; j++) { activeEdgeList[fromHere+j] = edgeList[fromThere+j]; } } } template <class E> SubgraphGenerator<E>::SubgraphGenerator(Graph<E> &graph) { gpuErrorcheck(hipHostMalloc(&activeNodesLabeling, graph.num_nodes * sizeof(unsigned int))); gpuErrorcheck(hipHostMalloc(&activeNodesDegree, graph.num_nodes * sizeof(u_int64_t))); gpuErrorcheck(hipHostMalloc(&prefixLabeling, graph.num_nodes * sizeof(unsigned int))); gpuErrorcheck(hipHostMalloc(&prefixSumDegrees, (graph.num_nodes+1) * sizeof(u_int64_t))); gpuErrorcheck(hipMalloc(&d_activeNodesLabeling, graph.num_nodes * sizeof(unsigned int))); gpuErrorcheck(hipMalloc(&d_activeNodesDegree, graph.num_nodes * sizeof(u_int64_t))); gpuErrorcheck(hipMalloc(&d_prefixLabeling, graph.num_nodes * sizeof(unsigned int))); gpuErrorcheck(hipMalloc(&d_prefixSumDegrees , (graph.num_nodes+1) * sizeof(u_int64_t))); } template <class E> SubgraphGenerator<E>::SubgraphGenerator(GraphPR<E> &graph) { gpuErrorcheck(hipHostMalloc(&activeNodesLabeling, graph.num_nodes * sizeof(unsigned int))); gpuErrorcheck(hipHostMalloc(&activeNodesDegree, graph.num_nodes * sizeof(u_int64_t))); gpuErrorcheck(hipHostMalloc(&prefixLabeling, graph.num_nodes * sizeof(unsigned int))); gpuErrorcheck(hipHostMalloc(&prefixSumDegrees, (graph.num_nodes+1) * sizeof(u_int64_t))); gpuErrorcheck(hipMalloc(&d_activeNodesLabeling, graph.num_nodes * sizeof(unsigned int))); gpuErrorcheck(hipMalloc(&d_activeNodesDegree, graph.num_nodes * sizeof(u_int64_t))); gpuErrorcheck(hipMalloc(&d_prefixLabeling, graph.num_nodes * sizeof(unsigned int))); gpuErrorcheck(hipMalloc(&d_prefixSumDegrees , (graph.num_nodes+1) * sizeof(u_int64_t))); } template <class E> void SubgraphGenerator<E>::generate(Graph<E> &graph, Subgraph<E> &subgraph) { //std::chrono::time_point<std::chrono::system_clock> startDynG, finishDynG; //startDynG = std::chrono::system_clock::now(); hipLaunchKernelGGL(( prePrefix), dim3(graph.num_nodes/512+1), dim3(512), 0, 0, d_activeNodesLabeling, d_activeNodesDegree, graph.d_outDegree, graph.d_label1, graph.d_label2, graph.num_nodes); thrust::device_ptr<unsigned int> ptr_labeling(d_activeNodesLabeling); thrust::device_ptr<unsigned int> ptr_labeling_prefixsum(d_prefixLabeling); subgraph.numActiveNodes = thrust::reduce(ptr_labeling, ptr_labeling + graph.num_nodes); //cout << "Number of Active Nodes = " << subgraph.numActiveNodes << endl; thrust::exclusive_scan(ptr_labeling, ptr_labeling + graph.num_nodes, ptr_labeling_prefixsum); hipLaunchKernelGGL(( makeQueue), dim3(graph.num_nodes/512+1), dim3(512), 0, 0, subgraph.d_activeNodes, d_activeNodesLabeling, d_prefixLabeling, graph.num_nodes); gpuErrorcheck(hipMemcpy(subgraph.activeNodes, subgraph.d_activeNodes, subgraph.numActiveNodes*sizeof(unsigned int), hipMemcpyDeviceToHost)); thrust::device_ptr<u_int64_t> ptr_degrees(d_activeNodesDegree); thrust::device_ptr<u_int64_t> ptr_degrees_prefixsum(d_prefixSumDegrees); thrust::exclusive_scan(ptr_degrees, ptr_degrees + graph.num_nodes, ptr_degrees_prefixsum); hipLaunchKernelGGL(( makeActiveNodesPointer), dim3(graph.num_nodes/512+1), dim3(512), 0, 0, subgraph.d_activeNodesPointer, d_activeNodesLabeling, d_prefixLabeling, d_prefixSumDegrees, graph.num_nodes); gpuErrorcheck(hipMemcpy(subgraph.activeNodesPointer, subgraph.d_activeNodesPointer, subgraph.numActiveNodes*sizeof(u_int64_t), hipMemcpyDeviceToHost)); u_int64_t numActiveEdges = 0; if(subgraph.numActiveNodes>0) numActiveEdges = subgraph.activeNodesPointer[subgraph.numActiveNodes-1] + graph.outDegree[subgraph.activeNodes[subgraph.numActiveNodes-1]]; u_int64_t last = numActiveEdges; gpuErrorcheck(hipMemcpy(subgraph.d_activeNodesPointer+subgraph.numActiveNodes, &last, sizeof(u_int64_t), hipMemcpyHostToDevice)); gpuErrorcheck(hipMemcpy(subgraph.activeNodesPointer, subgraph.d_activeNodesPointer, (subgraph.numActiveNodes+1)*sizeof(u_int64_t), hipMemcpyDeviceToHost)); //finishDynG = std::chrono::system_clock::now(); //std::chrono::duration<double> elapsed_seconds_dyng = finishDynG-startDynG; //std::time_t finish_time_dyng = std::chrono::system_clock::to_time_t(finishDynG); //std::cout << "Dynamic GPU Time = " << elapsed_seconds_dyng.count() << std::endl; //td::chrono::time_point<std::chrono::system_clock> startDynC, finishDynC; //startDynC = std::chrono::system_clock::now(); unsigned int numThreads = NUM_THREADS; if(subgraph.numActiveNodes < THRESHOLD_THREAD) numThreads = 1; thread runThreads[numThreads]; for(unsigned int t=0; t<numThreads; t++) { runThreads[t] = thread(dynamic<E>, t, numThreads, subgraph.numActiveNodes, subgraph.activeNodes, graph.outDegree, subgraph.activeNodesPointer, graph.nodePointer, subgraph.activeEdgeList, graph.edgeList); } for(unsigned int t=0; t<numThreads; t++) runThreads[t].join(); //finishDynC = std::chrono::system_clock::now(); //std::chrono::duration<double> elapsed_seconds_dync = finishDynC-startDynC; //std::time_t finish_time_dync = std::chrono::system_clock::to_time_t(finishDynC); //std::cout << "Dynamic CPU Time = " << elapsed_seconds_dync.count() << std::endl; } template <class E> void SubgraphGenerator<E>::generate(GraphPR<E> &graph, Subgraph<E> &subgraph, float acc) { //std::chrono::time_point<std::chrono::system_clock> startDynG, finishDynG; //startDynG = std::chrono::system_clock::now(); hipLaunchKernelGGL(( prePrefix), dim3(graph.num_nodes/512+1), dim3(512), 0, 0, d_activeNodesLabeling, d_activeNodesDegree, graph.d_outDegree, graph.d_delta, graph.num_nodes, acc); thrust::device_ptr<unsigned int> ptr_labeling(d_activeNodesLabeling); thrust::device_ptr<unsigned int> ptr_labeling_prefixsum(d_prefixLabeling); subgraph.numActiveNodes = thrust::reduce(ptr_labeling, ptr_labeling + graph.num_nodes); //cout << "Number of Active Nodes = " << subgraph.numActiveNodes << endl; thrust::exclusive_scan(ptr_labeling, ptr_labeling + graph.num_nodes, ptr_labeling_prefixsum); hipLaunchKernelGGL(( makeQueue), dim3(graph.num_nodes/512+1), dim3(512), 0, 0, subgraph.d_activeNodes, d_activeNodesLabeling, d_prefixLabeling, graph.num_nodes); gpuErrorcheck(hipMemcpy(subgraph.activeNodes, subgraph.d_activeNodes, subgraph.numActiveNodes*sizeof(unsigned int), hipMemcpyDeviceToHost)); thrust::device_ptr<u_int64_t> ptr_degrees(d_activeNodesDegree); thrust::device_ptr<u_int64_t> ptr_degrees_prefixsum(d_prefixSumDegrees); thrust::exclusive_scan(ptr_degrees, ptr_degrees + graph.num_nodes, ptr_degrees_prefixsum); hipLaunchKernelGGL(( makeActiveNodesPointer), dim3(graph.num_nodes/512+1), dim3(512), 0, 0, subgraph.d_activeNodesPointer, d_activeNodesLabeling, d_prefixLabeling, d_prefixSumDegrees, graph.num_nodes); gpuErrorcheck(hipMemcpy(subgraph.activeNodesPointer, subgraph.d_activeNodesPointer, subgraph.numActiveNodes*sizeof(u_int64_t), hipMemcpyDeviceToHost)); u_int64_t numActiveEdges = 0; if(subgraph.numActiveNodes>0) numActiveEdges = subgraph.activeNodesPointer[subgraph.numActiveNodes-1] + graph.outDegree[subgraph.activeNodes[subgraph.numActiveNodes-1]]; u_int64_t last = numActiveEdges; gpuErrorcheck(hipMemcpy(subgraph.d_activeNodesPointer+subgraph.numActiveNodes, &last, sizeof(u_int64_t), hipMemcpyHostToDevice)); gpuErrorcheck(hipMemcpy(subgraph.activeNodesPointer, subgraph.d_activeNodesPointer, (subgraph.numActiveNodes+1)*sizeof(u_int64_t), hipMemcpyDeviceToHost)); //finishDynG = std::chrono::system_clock::now(); //std::chrono::duration<double> elapsed_seconds_dyng = finishDynG-startDynG; //std::time_t finish_time_dyng = std::chrono::system_clock::to_time_t(finishDynG); //std::cout << "Dynamic GPU Time = " << elapsed_seconds_dyng.count() << std::endl; //td::chrono::time_point<std::chrono::system_clock> startDynC, finishDynC; //startDynC = std::chrono::system_clock::now(); unsigned int numThreads = NUM_THREADS; if(subgraph.numActiveNodes < THRESHOLD_THREAD) numThreads = 1; thread runThreads[numThreads]; for(unsigned int t=0; t<numThreads; t++) { runThreads[t] = thread(dynamic<E>, t, numThreads, subgraph.numActiveNodes, subgraph.activeNodes, graph.outDegree, subgraph.activeNodesPointer, graph.nodePointer, subgraph.activeEdgeList, graph.edgeList); } for(unsigned int t=0; t<numThreads; t++) runThreads[t].join(); //finishDynC = std::chrono::system_clock::now(); //std::chrono::duration<double> elapsed_seconds_dync = finishDynC-startDynC; //std::time_t finish_time_dync = std::chrono::system_clock::to_time_t(finishDynC); //std::cout << "Dynamic CPU Time = " << elapsed_seconds_dync.count() << std::endl; } __global__ void prePrefix(unsigned int *activeNodesLabeling, u_int64_t *activeNodesDegree, u_int64_t *outDegree, int *numWalker1, unsigned int numNodes) { unsigned int id = blockDim.x * blockIdx.x + threadIdx.x; if(id < numNodes){ if(numWalker1[id] > 0) { activeNodesLabeling[id] = 1; } else { activeNodesLabeling[id] = 0; } activeNodesDegree[id] = 0; if(activeNodesLabeling[id] == 1) activeNodesDegree[id] = outDegree[id]; } } template <class E> void SubgraphGenerator<E>::generate(GraphPR<E> &graph, Subgraph<E> &subgraph, int *numWalker1) { //std::chrono::time_point<std::chrono::system_clock> startDynG, finishDynG; //startDynG = std::chrono::system_clock::now(); hipLaunchKernelGGL(( prePrefix), dim3(graph.num_nodes/512+1), dim3(512), 0, 0, d_activeNodesLabeling, d_activeNodesDegree, graph.d_outDegree, numWalker1, graph.num_nodes); thrust::device_ptr<unsigned int> ptr_labeling(d_activeNodesLabeling); thrust::device_ptr<unsigned int> ptr_labeling_prefixsum(d_prefixLabeling); subgraph.numActiveNodes = thrust::reduce(ptr_labeling, ptr_labeling + graph.num_nodes); //cout << "Number of Active Nodes = " << subgraph.numActiveNodes << endl; thrust::exclusive_scan(ptr_labeling, ptr_labeling + graph.num_nodes, ptr_labeling_prefixsum); hipLaunchKernelGGL(( makeQueue), dim3(graph.num_nodes/512+1), dim3(512), 0, 0, subgraph.d_activeNodes, d_activeNodesLabeling, d_prefixLabeling, graph.num_nodes); gpuErrorcheck(hipMemcpy(subgraph.activeNodes, subgraph.d_activeNodes, subgraph.numActiveNodes*sizeof(unsigned int), hipMemcpyDeviceToHost)); thrust::device_ptr<u_int64_t> ptr_degrees(d_activeNodesDegree); thrust::device_ptr<u_int64_t> ptr_degrees_prefixsum(d_prefixSumDegrees); thrust::exclusive_scan(ptr_degrees, ptr_degrees + graph.num_nodes, ptr_degrees_prefixsum); hipLaunchKernelGGL(( makeActiveNodesPointer), dim3(graph.num_nodes/512+1), dim3(512), 0, 0, subgraph.d_activeNodesPointer, d_activeNodesLabeling, d_prefixLabeling, d_prefixSumDegrees, graph.num_nodes); gpuErrorcheck(hipMemcpy(subgraph.activeNodesPointer, subgraph.d_activeNodesPointer, subgraph.numActiveNodes*sizeof(u_int64_t), hipMemcpyDeviceToHost)); u_int64_t numActiveEdges = 0; if(subgraph.numActiveNodes>0) numActiveEdges = subgraph.activeNodesPointer[subgraph.numActiveNodes-1] + graph.outDegree[subgraph.activeNodes[subgraph.numActiveNodes-1]]; u_int64_t last = numActiveEdges; gpuErrorcheck(hipMemcpy(subgraph.d_activeNodesPointer+subgraph.numActiveNodes, &last, sizeof(u_int64_t), hipMemcpyHostToDevice)); gpuErrorcheck(hipMemcpy(subgraph.activeNodesPointer, subgraph.d_activeNodesPointer, (subgraph.numActiveNodes+1)*sizeof(u_int64_t), hipMemcpyDeviceToHost)); //finishDynG = std::chrono::system_clock::now(); //std::chrono::duration<double> elapsed_seconds_dyng = finishDynG-startDynG; //std::time_t finish_time_dyng = std::chrono::system_clock::to_time_t(finishDynG); //std::cout << "Dynamic GPU Time = " << elapsed_seconds_dyng.count() << std::endl; //td::chrono::time_point<std::chrono::system_clock> startDynC, finishDynC; //startDynC = std::chrono::system_clock::now(); unsigned int numThreads = NUM_THREADS; if(subgraph.numActiveNodes < THRESHOLD_THREAD) numThreads = 1; thread runThreads[numThreads]; for(unsigned int t=0; t<numThreads; t++) { runThreads[t] = thread(dynamic<E>, t, numThreads, subgraph.numActiveNodes, subgraph.activeNodes, graph.outDegree, subgraph.activeNodesPointer, graph.nodePointer, subgraph.activeEdgeList, graph.edgeList); } for(unsigned int t=0; t<numThreads; t++) runThreads[t].join(); //finishDynC = std::chrono::system_clock::now(); //std::chrono::duration<double> elapsed_seconds_dync = finishDynC-startDynC; //std::time_t finish_time_dync = std::chrono::system_clock::to_time_t(finishDynC); //std::cout << "Dynamic CPU Time = " << elapsed_seconds_dync.count() << std::endl; } template class SubgraphGenerator<OutEdge>; template class SubgraphGenerator<OutEdgeWeighted>;
c890f7207158cc6879ca1d7cd2072ef8b2ec9997.cu
#include "subgraph_generator.cuh" #include "graph.cuh" #include "subgraph.cuh" #include "gpu_error_check.cuh" const unsigned int NUM_THREADS = 64; const unsigned int THRESHOLD_THREAD = 50000; __global__ void prePrefix(unsigned int *activeNodesLabeling, u_int64_t *activeNodesDegree, u_int64_t *outDegree, bool *label1, bool *label2, unsigned int numNodes) { unsigned int id = blockDim.x * blockIdx.x + threadIdx.x; if(id < numNodes){ activeNodesLabeling[id] = label1[id] || label2[id]; // label1 is always zero in sync //activeNodesLabeling[id] = label[id]; //activeNodesLabeling[id] = 1; activeNodesDegree[id] = 0; if(activeNodesLabeling[id] == 1) activeNodesDegree[id] = outDegree[id]; } } __global__ void prePrefix(unsigned int *activeNodesLabeling, u_int64_t *activeNodesDegree, u_int64_t *outDegree, float *delta, unsigned int numNodes, float acc) { unsigned int id = blockDim.x * blockIdx.x + threadIdx.x; if(id < numNodes){ if(delta[id] > acc) { activeNodesLabeling[id] = 1; } else { activeNodesLabeling[id] = 0; } activeNodesDegree[id] = 0; if(activeNodesLabeling[id] == 1) activeNodesDegree[id] = outDegree[id]; } } __global__ void makeQueue(unsigned int *activeNodes, unsigned int *activeNodesLabeling, unsigned int *prefixLabeling, unsigned int numNodes) { unsigned int id = blockDim.x * blockIdx.x + threadIdx.x; if(id < numNodes && activeNodesLabeling[id] == 1){ activeNodes[prefixLabeling[id]] = id; } } __global__ void makeActiveNodesPointer(u_int64_t *activeNodesPointer, unsigned int *activeNodesLabeling, unsigned int *prefixLabeling, u_int64_t *prefixSumDegrees, unsigned int numNodes) { unsigned int id = blockDim.x * blockIdx.x + threadIdx.x; if(id < numNodes && activeNodesLabeling[id] == 1){ activeNodesPointer[prefixLabeling[id]] = prefixSumDegrees[id]; } } // pthread template <class E> void dynamic(unsigned int tId, unsigned int numThreads, unsigned int numActiveNodes, unsigned int *activeNodes, u_int64_t *outDegree, u_int64_t *activeNodesPointer, u_int64_t *nodePointer, E *activeEdgeList, E *edgeList) { unsigned int chunkSize = ceil(numActiveNodes / numThreads); unsigned int left, right; left = tId * chunkSize; right = min(left+chunkSize, numActiveNodes); unsigned int thisNode; u_int64_t thisDegree; u_int64_t fromHere; u_int64_t fromThere; for(unsigned int i=left; i<right; i++) { thisNode = activeNodes[i]; thisDegree = outDegree[thisNode]; fromHere = activeNodesPointer[i]; fromThere = nodePointer[thisNode]; for(u_int64_t j=0; j<thisDegree; j++) { activeEdgeList[fromHere+j] = edgeList[fromThere+j]; } } } template <class E> SubgraphGenerator<E>::SubgraphGenerator(Graph<E> &graph) { gpuErrorcheck(cudaMallocHost(&activeNodesLabeling, graph.num_nodes * sizeof(unsigned int))); gpuErrorcheck(cudaMallocHost(&activeNodesDegree, graph.num_nodes * sizeof(u_int64_t))); gpuErrorcheck(cudaMallocHost(&prefixLabeling, graph.num_nodes * sizeof(unsigned int))); gpuErrorcheck(cudaMallocHost(&prefixSumDegrees, (graph.num_nodes+1) * sizeof(u_int64_t))); gpuErrorcheck(cudaMalloc(&d_activeNodesLabeling, graph.num_nodes * sizeof(unsigned int))); gpuErrorcheck(cudaMalloc(&d_activeNodesDegree, graph.num_nodes * sizeof(u_int64_t))); gpuErrorcheck(cudaMalloc(&d_prefixLabeling, graph.num_nodes * sizeof(unsigned int))); gpuErrorcheck(cudaMalloc(&d_prefixSumDegrees , (graph.num_nodes+1) * sizeof(u_int64_t))); } template <class E> SubgraphGenerator<E>::SubgraphGenerator(GraphPR<E> &graph) { gpuErrorcheck(cudaMallocHost(&activeNodesLabeling, graph.num_nodes * sizeof(unsigned int))); gpuErrorcheck(cudaMallocHost(&activeNodesDegree, graph.num_nodes * sizeof(u_int64_t))); gpuErrorcheck(cudaMallocHost(&prefixLabeling, graph.num_nodes * sizeof(unsigned int))); gpuErrorcheck(cudaMallocHost(&prefixSumDegrees, (graph.num_nodes+1) * sizeof(u_int64_t))); gpuErrorcheck(cudaMalloc(&d_activeNodesLabeling, graph.num_nodes * sizeof(unsigned int))); gpuErrorcheck(cudaMalloc(&d_activeNodesDegree, graph.num_nodes * sizeof(u_int64_t))); gpuErrorcheck(cudaMalloc(&d_prefixLabeling, graph.num_nodes * sizeof(unsigned int))); gpuErrorcheck(cudaMalloc(&d_prefixSumDegrees , (graph.num_nodes+1) * sizeof(u_int64_t))); } template <class E> void SubgraphGenerator<E>::generate(Graph<E> &graph, Subgraph<E> &subgraph) { //std::chrono::time_point<std::chrono::system_clock> startDynG, finishDynG; //startDynG = std::chrono::system_clock::now(); prePrefix<<<graph.num_nodes/512+1, 512>>>(d_activeNodesLabeling, d_activeNodesDegree, graph.d_outDegree, graph.d_label1, graph.d_label2, graph.num_nodes); thrust::device_ptr<unsigned int> ptr_labeling(d_activeNodesLabeling); thrust::device_ptr<unsigned int> ptr_labeling_prefixsum(d_prefixLabeling); subgraph.numActiveNodes = thrust::reduce(ptr_labeling, ptr_labeling + graph.num_nodes); //cout << "Number of Active Nodes = " << subgraph.numActiveNodes << endl; thrust::exclusive_scan(ptr_labeling, ptr_labeling + graph.num_nodes, ptr_labeling_prefixsum); makeQueue<<<graph.num_nodes/512+1, 512>>>(subgraph.d_activeNodes, d_activeNodesLabeling, d_prefixLabeling, graph.num_nodes); gpuErrorcheck(cudaMemcpy(subgraph.activeNodes, subgraph.d_activeNodes, subgraph.numActiveNodes*sizeof(unsigned int), cudaMemcpyDeviceToHost)); thrust::device_ptr<u_int64_t> ptr_degrees(d_activeNodesDegree); thrust::device_ptr<u_int64_t> ptr_degrees_prefixsum(d_prefixSumDegrees); thrust::exclusive_scan(ptr_degrees, ptr_degrees + graph.num_nodes, ptr_degrees_prefixsum); makeActiveNodesPointer<<<graph.num_nodes/512+1, 512>>>(subgraph.d_activeNodesPointer, d_activeNodesLabeling, d_prefixLabeling, d_prefixSumDegrees, graph.num_nodes); gpuErrorcheck(cudaMemcpy(subgraph.activeNodesPointer, subgraph.d_activeNodesPointer, subgraph.numActiveNodes*sizeof(u_int64_t), cudaMemcpyDeviceToHost)); u_int64_t numActiveEdges = 0; if(subgraph.numActiveNodes>0) numActiveEdges = subgraph.activeNodesPointer[subgraph.numActiveNodes-1] + graph.outDegree[subgraph.activeNodes[subgraph.numActiveNodes-1]]; u_int64_t last = numActiveEdges; gpuErrorcheck(cudaMemcpy(subgraph.d_activeNodesPointer+subgraph.numActiveNodes, &last, sizeof(u_int64_t), cudaMemcpyHostToDevice)); gpuErrorcheck(cudaMemcpy(subgraph.activeNodesPointer, subgraph.d_activeNodesPointer, (subgraph.numActiveNodes+1)*sizeof(u_int64_t), cudaMemcpyDeviceToHost)); //finishDynG = std::chrono::system_clock::now(); //std::chrono::duration<double> elapsed_seconds_dyng = finishDynG-startDynG; //std::time_t finish_time_dyng = std::chrono::system_clock::to_time_t(finishDynG); //std::cout << "Dynamic GPU Time = " << elapsed_seconds_dyng.count() << std::endl; //td::chrono::time_point<std::chrono::system_clock> startDynC, finishDynC; //startDynC = std::chrono::system_clock::now(); unsigned int numThreads = NUM_THREADS; if(subgraph.numActiveNodes < THRESHOLD_THREAD) numThreads = 1; thread runThreads[numThreads]; for(unsigned int t=0; t<numThreads; t++) { runThreads[t] = thread(dynamic<E>, t, numThreads, subgraph.numActiveNodes, subgraph.activeNodes, graph.outDegree, subgraph.activeNodesPointer, graph.nodePointer, subgraph.activeEdgeList, graph.edgeList); } for(unsigned int t=0; t<numThreads; t++) runThreads[t].join(); //finishDynC = std::chrono::system_clock::now(); //std::chrono::duration<double> elapsed_seconds_dync = finishDynC-startDynC; //std::time_t finish_time_dync = std::chrono::system_clock::to_time_t(finishDynC); //std::cout << "Dynamic CPU Time = " << elapsed_seconds_dync.count() << std::endl; } template <class E> void SubgraphGenerator<E>::generate(GraphPR<E> &graph, Subgraph<E> &subgraph, float acc) { //std::chrono::time_point<std::chrono::system_clock> startDynG, finishDynG; //startDynG = std::chrono::system_clock::now(); prePrefix<<<graph.num_nodes/512+1, 512>>>(d_activeNodesLabeling, d_activeNodesDegree, graph.d_outDegree, graph.d_delta, graph.num_nodes, acc); thrust::device_ptr<unsigned int> ptr_labeling(d_activeNodesLabeling); thrust::device_ptr<unsigned int> ptr_labeling_prefixsum(d_prefixLabeling); subgraph.numActiveNodes = thrust::reduce(ptr_labeling, ptr_labeling + graph.num_nodes); //cout << "Number of Active Nodes = " << subgraph.numActiveNodes << endl; thrust::exclusive_scan(ptr_labeling, ptr_labeling + graph.num_nodes, ptr_labeling_prefixsum); makeQueue<<<graph.num_nodes/512+1, 512>>>(subgraph.d_activeNodes, d_activeNodesLabeling, d_prefixLabeling, graph.num_nodes); gpuErrorcheck(cudaMemcpy(subgraph.activeNodes, subgraph.d_activeNodes, subgraph.numActiveNodes*sizeof(unsigned int), cudaMemcpyDeviceToHost)); thrust::device_ptr<u_int64_t> ptr_degrees(d_activeNodesDegree); thrust::device_ptr<u_int64_t> ptr_degrees_prefixsum(d_prefixSumDegrees); thrust::exclusive_scan(ptr_degrees, ptr_degrees + graph.num_nodes, ptr_degrees_prefixsum); makeActiveNodesPointer<<<graph.num_nodes/512+1, 512>>>(subgraph.d_activeNodesPointer, d_activeNodesLabeling, d_prefixLabeling, d_prefixSumDegrees, graph.num_nodes); gpuErrorcheck(cudaMemcpy(subgraph.activeNodesPointer, subgraph.d_activeNodesPointer, subgraph.numActiveNodes*sizeof(u_int64_t), cudaMemcpyDeviceToHost)); u_int64_t numActiveEdges = 0; if(subgraph.numActiveNodes>0) numActiveEdges = subgraph.activeNodesPointer[subgraph.numActiveNodes-1] + graph.outDegree[subgraph.activeNodes[subgraph.numActiveNodes-1]]; u_int64_t last = numActiveEdges; gpuErrorcheck(cudaMemcpy(subgraph.d_activeNodesPointer+subgraph.numActiveNodes, &last, sizeof(u_int64_t), cudaMemcpyHostToDevice)); gpuErrorcheck(cudaMemcpy(subgraph.activeNodesPointer, subgraph.d_activeNodesPointer, (subgraph.numActiveNodes+1)*sizeof(u_int64_t), cudaMemcpyDeviceToHost)); //finishDynG = std::chrono::system_clock::now(); //std::chrono::duration<double> elapsed_seconds_dyng = finishDynG-startDynG; //std::time_t finish_time_dyng = std::chrono::system_clock::to_time_t(finishDynG); //std::cout << "Dynamic GPU Time = " << elapsed_seconds_dyng.count() << std::endl; //td::chrono::time_point<std::chrono::system_clock> startDynC, finishDynC; //startDynC = std::chrono::system_clock::now(); unsigned int numThreads = NUM_THREADS; if(subgraph.numActiveNodes < THRESHOLD_THREAD) numThreads = 1; thread runThreads[numThreads]; for(unsigned int t=0; t<numThreads; t++) { runThreads[t] = thread(dynamic<E>, t, numThreads, subgraph.numActiveNodes, subgraph.activeNodes, graph.outDegree, subgraph.activeNodesPointer, graph.nodePointer, subgraph.activeEdgeList, graph.edgeList); } for(unsigned int t=0; t<numThreads; t++) runThreads[t].join(); //finishDynC = std::chrono::system_clock::now(); //std::chrono::duration<double> elapsed_seconds_dync = finishDynC-startDynC; //std::time_t finish_time_dync = std::chrono::system_clock::to_time_t(finishDynC); //std::cout << "Dynamic CPU Time = " << elapsed_seconds_dync.count() << std::endl; } __global__ void prePrefix(unsigned int *activeNodesLabeling, u_int64_t *activeNodesDegree, u_int64_t *outDegree, int *numWalker1, unsigned int numNodes) { unsigned int id = blockDim.x * blockIdx.x + threadIdx.x; if(id < numNodes){ if(numWalker1[id] > 0) { activeNodesLabeling[id] = 1; } else { activeNodesLabeling[id] = 0; } activeNodesDegree[id] = 0; if(activeNodesLabeling[id] == 1) activeNodesDegree[id] = outDegree[id]; } } template <class E> void SubgraphGenerator<E>::generate(GraphPR<E> &graph, Subgraph<E> &subgraph, int *numWalker1) { //std::chrono::time_point<std::chrono::system_clock> startDynG, finishDynG; //startDynG = std::chrono::system_clock::now(); prePrefix<<<graph.num_nodes/512+1, 512>>>(d_activeNodesLabeling, d_activeNodesDegree, graph.d_outDegree, numWalker1, graph.num_nodes); thrust::device_ptr<unsigned int> ptr_labeling(d_activeNodesLabeling); thrust::device_ptr<unsigned int> ptr_labeling_prefixsum(d_prefixLabeling); subgraph.numActiveNodes = thrust::reduce(ptr_labeling, ptr_labeling + graph.num_nodes); //cout << "Number of Active Nodes = " << subgraph.numActiveNodes << endl; thrust::exclusive_scan(ptr_labeling, ptr_labeling + graph.num_nodes, ptr_labeling_prefixsum); makeQueue<<<graph.num_nodes/512+1, 512>>>(subgraph.d_activeNodes, d_activeNodesLabeling, d_prefixLabeling, graph.num_nodes); gpuErrorcheck(cudaMemcpy(subgraph.activeNodes, subgraph.d_activeNodes, subgraph.numActiveNodes*sizeof(unsigned int), cudaMemcpyDeviceToHost)); thrust::device_ptr<u_int64_t> ptr_degrees(d_activeNodesDegree); thrust::device_ptr<u_int64_t> ptr_degrees_prefixsum(d_prefixSumDegrees); thrust::exclusive_scan(ptr_degrees, ptr_degrees + graph.num_nodes, ptr_degrees_prefixsum); makeActiveNodesPointer<<<graph.num_nodes/512+1, 512>>>(subgraph.d_activeNodesPointer, d_activeNodesLabeling, d_prefixLabeling, d_prefixSumDegrees, graph.num_nodes); gpuErrorcheck(cudaMemcpy(subgraph.activeNodesPointer, subgraph.d_activeNodesPointer, subgraph.numActiveNodes*sizeof(u_int64_t), cudaMemcpyDeviceToHost)); u_int64_t numActiveEdges = 0; if(subgraph.numActiveNodes>0) numActiveEdges = subgraph.activeNodesPointer[subgraph.numActiveNodes-1] + graph.outDegree[subgraph.activeNodes[subgraph.numActiveNodes-1]]; u_int64_t last = numActiveEdges; gpuErrorcheck(cudaMemcpy(subgraph.d_activeNodesPointer+subgraph.numActiveNodes, &last, sizeof(u_int64_t), cudaMemcpyHostToDevice)); gpuErrorcheck(cudaMemcpy(subgraph.activeNodesPointer, subgraph.d_activeNodesPointer, (subgraph.numActiveNodes+1)*sizeof(u_int64_t), cudaMemcpyDeviceToHost)); //finishDynG = std::chrono::system_clock::now(); //std::chrono::duration<double> elapsed_seconds_dyng = finishDynG-startDynG; //std::time_t finish_time_dyng = std::chrono::system_clock::to_time_t(finishDynG); //std::cout << "Dynamic GPU Time = " << elapsed_seconds_dyng.count() << std::endl; //td::chrono::time_point<std::chrono::system_clock> startDynC, finishDynC; //startDynC = std::chrono::system_clock::now(); unsigned int numThreads = NUM_THREADS; if(subgraph.numActiveNodes < THRESHOLD_THREAD) numThreads = 1; thread runThreads[numThreads]; for(unsigned int t=0; t<numThreads; t++) { runThreads[t] = thread(dynamic<E>, t, numThreads, subgraph.numActiveNodes, subgraph.activeNodes, graph.outDegree, subgraph.activeNodesPointer, graph.nodePointer, subgraph.activeEdgeList, graph.edgeList); } for(unsigned int t=0; t<numThreads; t++) runThreads[t].join(); //finishDynC = std::chrono::system_clock::now(); //std::chrono::duration<double> elapsed_seconds_dync = finishDynC-startDynC; //std::time_t finish_time_dync = std::chrono::system_clock::to_time_t(finishDynC); //std::cout << "Dynamic CPU Time = " << elapsed_seconds_dync.count() << std::endl; } template class SubgraphGenerator<OutEdge>; template class SubgraphGenerator<OutEdgeWeighted>;
7f25254f41c33797197b4cae0f68ee099fcb69ab.hip
// !!! This is a file automatically generated by hipify!!! #include <stdio.h> #include <stdlib.h> //cuda include #include <hip/hip_runtime.h> __device__ void Gswap(void *from, void *to, int length){ void *tmp = malloc(length); memcpy(tmp, to, length); memcpy(to, from, length); memcpy(from, tmp, length); }
7f25254f41c33797197b4cae0f68ee099fcb69ab.cu
#include <stdio.h> #include <stdlib.h> //cuda include #include <cuda.h> __device__ void Gswap(void *from, void *to, int length){ void *tmp = malloc(length); memcpy(tmp, to, length); memcpy(to, from, length); memcpy(from, tmp, length); }
0b868b8e12b205a64ad227aaeab0ddd984b783d4.hip
// !!! This is a file automatically generated by hipify!!! #include <stdio.h> #include <hip/hip_runtime.h> __global__ void dkernel(unsigned *vector, unsigned vectorsize) { unsigned id = blockIdx.x * blockDim.x + threadIdx.x; if (id < vectorsize) vector[id] = id; } #define BLOCKSIZE 1024 int main(int nn, char *str[]) { unsigned N = atoi(str[1]); unsigned *vector, *hvector; hipMalloc(&vector, N * sizeof(unsigned)); hvector = (unsigned *)malloc(N * sizeof(unsigned)); unsigned nblocks = ceil((float)N / BLOCKSIZE); printf("nblocks = %d\n", nblocks); hipLaunchKernelGGL(( dkernel), dim3(nblocks), dim3(BLOCKSIZE), 0, 0, vector, N); hipMemcpy(hvector, vector, N * sizeof(unsigned), hipMemcpyDeviceToHost); for (unsigned ii = 0; ii < N; ++ii) { printf("%4d ", hvector[ii]); if (ii % 1000 == 0) printf("\n"); } return 0; }
0b868b8e12b205a64ad227aaeab0ddd984b783d4.cu
#include <stdio.h> #include <cuda.h> __global__ void dkernel(unsigned *vector, unsigned vectorsize) { unsigned id = blockIdx.x * blockDim.x + threadIdx.x; if (id < vectorsize) vector[id] = id; } #define BLOCKSIZE 1024 int main(int nn, char *str[]) { unsigned N = atoi(str[1]); unsigned *vector, *hvector; cudaMalloc(&vector, N * sizeof(unsigned)); hvector = (unsigned *)malloc(N * sizeof(unsigned)); unsigned nblocks = ceil((float)N / BLOCKSIZE); printf("nblocks = %d\n", nblocks); dkernel<<<nblocks, BLOCKSIZE>>>(vector, N); cudaMemcpy(hvector, vector, N * sizeof(unsigned), cudaMemcpyDeviceToHost); for (unsigned ii = 0; ii < N; ++ii) { printf("%4d ", hvector[ii]); if (ii % 1000 == 0) printf("\n"); } return 0; }
74020b258b8cc52547a66f6e929df030f61f088f.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include "pointwise_hist2.cuh" #include "split_properties_helpers.cuh" #include "compute_point_hist2_loop.cuh" #include "pointwise_hist2_half_byte_template.cuh" #include <hip/hip_cooperative_groups.h> #include <library/cuda/wrappers/arch.cuh> #include <catboost/cuda/cuda_util/kernel/instructions.cuh> #include <catboost/cuda/cuda_util/kernel/kernel_helpers.cuh> #include <library/cuda/wrappers/arch.cuh> using namespace cooperative_groups; namespace NKernel { template <int BlockSize, bool IsFullPass, int M> #if __CUDA_ARCH__ >= 520 __launch_bounds__(BlockSize, 2) #else __launch_bounds__(BlockSize, 1) #endif __global__ void ComputeSplitPropertiesHalfByteImpl( const TCFeature* __restrict__ feature, int fCount, const ui32* __restrict__ cindex, const float* __restrict__ target, const float* __restrict__ weight, const ui32* __restrict__ indices, const TDataPartition* __restrict__ partition, float* __restrict__ binSums, const int totalFeatureCount) { TPointwisePartOffsetsHelper helper(gridDim.z); helper.ShiftPartAndBinSumsPtr(partition, binSums, totalFeatureCount, IsFullPass); feature += (blockIdx.x / M) * 8; cindex += feature->Offset; fCount = min(fCount - (blockIdx.x / M) * 8, 8); // __shared__ float smem[16 * BlockSize]; using THist = TPointHistHalfByte<BlockSize>; #if __CUDA_ARCH__ > 350 const bool use64BitLoad = IsFullPass; #else const bool use64BitLoad = false; #endif if (use64BitLoad) { #if __CUDA_ARCH__ <= 350 const int OUTER_UNROLL = 2; #else const int OUTER_UNROLL = 1; #endif ComputeHistogram2 < BlockSize, OUTER_UNROLL, 1, M, THist > (indices, partition->Offset, partition->Size, target, weight, cindex, smem); } else { #if __CUDA_ARCH__ <= 300 const int INNER_UNROLL = 2; const int OUTER_UNROLL = 2; #elif __CUDA_ARCH__ <= 350 const int INNER_UNROLL = 4; const int OUTER_UNROLL = 2; #else const int INNER_UNROLL = 1; const int OUTER_UNROLL = 1; #endif ComputeHistogram < BlockSize, OUTER_UNROLL, INNER_UNROLL, 1, M, THist > ( indices, partition->Offset, partition->Size, target, weight, cindex, smem); } __syncthreads(); const int fid = (threadIdx.x / 32); const int fold = (threadIdx.x / 2) & 15; const int w = threadIdx.x & 1; if (fid < fCount && fold < feature[fid].Folds) { const float result = smem[fold * 16 + 2 * fid + w]; if (abs(result) > 1e-20) { if (M > 1) { atomicAdd(binSums + (feature[fid].FirstFoldIndex + fold) * 2 + w, result); } else { binSums[(feature[fid].FirstFoldIndex + fold) * 2 + w] = result; } } } } template <int BlockSize, int BlocksPerFeatureCount> inline void RunComputeHist2HalfByteKernel(const TCFeature* nbFeatures, int nbCount, const ui32* cindex, const float* target, const float* weight, const ui32* indices, const TDataPartition* partition, float* binSums, const int binFeatureCount, bool fullPass, TCudaStream stream, dim3 numBlocks) { if (fullPass) { ComputeSplitPropertiesHalfByteImpl < BlockSize, true, BlocksPerFeatureCount > << <numBlocks, BlockSize, 0, stream>>>( nbFeatures, nbCount, cindex, target, weight, indices, partition, binSums, binFeatureCount ); } else { ComputeSplitPropertiesHalfByteImpl < BlockSize, false, BlocksPerFeatureCount > << <numBlocks, BlockSize, 0, stream>>>( nbFeatures, nbCount, cindex, target, weight, indices, partition, binSums, binFeatureCount); } } void ComputeHist2HalfByte(const TCFeature* halfByteFeatures, ui32 halfByteFeaturesCount, const ui32* cindex, const float* target, const float* weight, const ui32* indices, ui32 size, const TDataPartition* partition, ui32 partsCount, ui32 foldCount, bool fullPass, const ui32 histLineSize, float* binSums, TCudaStream stream) { dim3 numBlocks; numBlocks.x = static_cast<ui32>((halfByteFeaturesCount + 7) / 8); const int histCount = fullPass ? partsCount : partsCount / 2; numBlocks.y = static_cast<ui32>(histCount); numBlocks.z = foldCount; const int blockSize = 768; const ui32 multiplier = min(EstimateBlockPerFeatureMultiplier(numBlocks, size), 64); numBlocks.x *= multiplier; if (halfByteFeaturesCount) { #define COMPUTE(k)\ RunComputeHist2HalfByteKernel<blockSize, k>(halfByteFeatures, halfByteFeaturesCount, cindex,\ target,\ weight, indices, partition, binSums, histLineSize,\ fullPass,\ stream, numBlocks); if (multiplier == 1) { COMPUTE(1) } else if (multiplier == 2) { COMPUTE(2) } else if (multiplier == 4) { COMPUTE(4) } else if (multiplier == 8) { COMPUTE(8) } else if (multiplier == 16) { COMPUTE(16) } else if (multiplier == 32) { COMPUTE(32) } else if (multiplier == 64) { COMPUTE(64) } else { exit(1); } #undef COMPUTE } } }
74020b258b8cc52547a66f6e929df030f61f088f.cu
#include "pointwise_hist2.cuh" #include "split_properties_helpers.cuh" #include "compute_point_hist2_loop.cuh" #include "pointwise_hist2_half_byte_template.cuh" #include <cooperative_groups.h> #include <library/cuda/wrappers/arch.cuh> #include <catboost/cuda/cuda_util/kernel/instructions.cuh> #include <catboost/cuda/cuda_util/kernel/kernel_helpers.cuh> #include <library/cuda/wrappers/arch.cuh> using namespace cooperative_groups; namespace NKernel { template <int BlockSize, bool IsFullPass, int M> #if __CUDA_ARCH__ >= 520 __launch_bounds__(BlockSize, 2) #else __launch_bounds__(BlockSize, 1) #endif __global__ void ComputeSplitPropertiesHalfByteImpl( const TCFeature* __restrict__ feature, int fCount, const ui32* __restrict__ cindex, const float* __restrict__ target, const float* __restrict__ weight, const ui32* __restrict__ indices, const TDataPartition* __restrict__ partition, float* __restrict__ binSums, const int totalFeatureCount) { TPointwisePartOffsetsHelper helper(gridDim.z); helper.ShiftPartAndBinSumsPtr(partition, binSums, totalFeatureCount, IsFullPass); feature += (blockIdx.x / M) * 8; cindex += feature->Offset; fCount = min(fCount - (blockIdx.x / M) * 8, 8); // __shared__ float smem[16 * BlockSize]; using THist = TPointHistHalfByte<BlockSize>; #if __CUDA_ARCH__ > 350 const bool use64BitLoad = IsFullPass; #else const bool use64BitLoad = false; #endif if (use64BitLoad) { #if __CUDA_ARCH__ <= 350 const int OUTER_UNROLL = 2; #else const int OUTER_UNROLL = 1; #endif ComputeHistogram2 < BlockSize, OUTER_UNROLL, 1, M, THist > (indices, partition->Offset, partition->Size, target, weight, cindex, smem); } else { #if __CUDA_ARCH__ <= 300 const int INNER_UNROLL = 2; const int OUTER_UNROLL = 2; #elif __CUDA_ARCH__ <= 350 const int INNER_UNROLL = 4; const int OUTER_UNROLL = 2; #else const int INNER_UNROLL = 1; const int OUTER_UNROLL = 1; #endif ComputeHistogram < BlockSize, OUTER_UNROLL, INNER_UNROLL, 1, M, THist > ( indices, partition->Offset, partition->Size, target, weight, cindex, smem); } __syncthreads(); const int fid = (threadIdx.x / 32); const int fold = (threadIdx.x / 2) & 15; const int w = threadIdx.x & 1; if (fid < fCount && fold < feature[fid].Folds) { const float result = smem[fold * 16 + 2 * fid + w]; if (abs(result) > 1e-20) { if (M > 1) { atomicAdd(binSums + (feature[fid].FirstFoldIndex + fold) * 2 + w, result); } else { binSums[(feature[fid].FirstFoldIndex + fold) * 2 + w] = result; } } } } template <int BlockSize, int BlocksPerFeatureCount> inline void RunComputeHist2HalfByteKernel(const TCFeature* nbFeatures, int nbCount, const ui32* cindex, const float* target, const float* weight, const ui32* indices, const TDataPartition* partition, float* binSums, const int binFeatureCount, bool fullPass, TCudaStream stream, dim3 numBlocks) { if (fullPass) { ComputeSplitPropertiesHalfByteImpl < BlockSize, true, BlocksPerFeatureCount > << <numBlocks, BlockSize, 0, stream>>>( nbFeatures, nbCount, cindex, target, weight, indices, partition, binSums, binFeatureCount ); } else { ComputeSplitPropertiesHalfByteImpl < BlockSize, false, BlocksPerFeatureCount > << <numBlocks, BlockSize, 0, stream>>>( nbFeatures, nbCount, cindex, target, weight, indices, partition, binSums, binFeatureCount); } } void ComputeHist2HalfByte(const TCFeature* halfByteFeatures, ui32 halfByteFeaturesCount, const ui32* cindex, const float* target, const float* weight, const ui32* indices, ui32 size, const TDataPartition* partition, ui32 partsCount, ui32 foldCount, bool fullPass, const ui32 histLineSize, float* binSums, TCudaStream stream) { dim3 numBlocks; numBlocks.x = static_cast<ui32>((halfByteFeaturesCount + 7) / 8); const int histCount = fullPass ? partsCount : partsCount / 2; numBlocks.y = static_cast<ui32>(histCount); numBlocks.z = foldCount; const int blockSize = 768; const ui32 multiplier = min(EstimateBlockPerFeatureMultiplier(numBlocks, size), 64); numBlocks.x *= multiplier; if (halfByteFeaturesCount) { #define COMPUTE(k)\ RunComputeHist2HalfByteKernel<blockSize, k>(halfByteFeatures, halfByteFeaturesCount, cindex,\ target,\ weight, indices, partition, binSums, histLineSize,\ fullPass,\ stream, numBlocks); if (multiplier == 1) { COMPUTE(1) } else if (multiplier == 2) { COMPUTE(2) } else if (multiplier == 4) { COMPUTE(4) } else if (multiplier == 8) { COMPUTE(8) } else if (multiplier == 16) { COMPUTE(16) } else if (multiplier == 32) { COMPUTE(32) } else if (multiplier == 64) { COMPUTE(64) } else { exit(1); } #undef COMPUTE } } }
04caf0cd19260e082131ee90cb12f55a90908355.hip
// !!! This is a file automatically generated by hipify!!! #define TORCH_ASSERT_ONLY_METHOD_OPERATORS #include <ATen/core/Tensor.h> #include <ATen/AccumulateType.h> #include <ATen/Dispatch.h> #include <ATen/div_rtn.h> #include <ATen/hip/HIPBlas.h> #include <ATen/native/ConvUtils.h> #include <ATen/native/Resize.h> #include <ATen/native/hip/im2col.cuh> #ifndef AT_PER_OPERATOR_HEADERS #include <ATen/Functions.h> #include <ATen/NativeFunctions.h> #else #include <ATen/ops/_slow_conv2d_forward_native.h> #include <ATen/ops/_slow_conv2d_backward_native.h> #include <ATen/ops/empty.h> #include <ATen/ops/sum.h> #endif namespace at { namespace native { namespace { void slow_conv2d_shape_check( const Tensor& input, const Tensor& grad_output, const Tensor& weight, const Tensor& bias, int64_t kH, int64_t kW, int64_t dH, int64_t dW, int64_t padH, int64_t padW, bool weight_nullable) { TORCH_CHECK(kW > 0 && kH > 0, "kernel size should be greater than zero, but got kH: ", kH, " kW: ", kW); TORCH_CHECK(dW > 0 && dH > 0, "stride should be greater than zero, but got dH: ", dH, " dW: ", dW); TORCH_CHECK(weight_nullable || weight.defined(), "weight tensor is expected to be non-nullable"); TORCH_CHECK(!weight.defined() || ((weight.numel() > 0) && (weight.dim() == 2)), "non-empty 2D weight tensor expected, but got: ", weight.sizes()); TORCH_CHECK(!bias.defined() || (bias.dim() == 1 && bias.sizes()[0] == weight.sizes()[0]), "Expected bias to have shape [", weight.sizes()[0], "] but got ", bias.sizes()); const auto in_sizes = input.sizes(); constexpr int ndim = 4; constexpr int dimf = 1; constexpr int dimh = 2; constexpr int dimw = 3; TORCH_CHECK(in_sizes.size() == ndim, "Expected 4D input tensor, but got ", in_sizes); // Allow for empty batch size but not other dimensions const bool valid_empty = c10::multiply_integers(in_sizes.slice(1)) != 0; TORCH_CHECK(valid_empty, "non-empty input tensor expected but got: ", in_sizes); int64_t inputHeight = in_sizes[dimh]; int64_t inputWidth = in_sizes[dimw]; int64_t exactInputHeight = inputHeight + 2 * padH; int64_t exactInputWidth = inputWidth + 2 * padW; TORCH_CHECK(exactInputHeight >= kH && exactInputWidth >= kW, "Calculated padded input size per channel: ", IntArrayRef{exactInputHeight, exactInputWidth}, ". Kernel size: ", IntArrayRef{kH, kW}, ". Kernel size can't be greater than actual input size"); // NOTE: can't use conv_output_size if the weight isn't defined auto outputHeight = div_rtn<int64_t>(exactInputHeight - kH, dH) + 1; auto outputWidth = div_rtn<int64_t>(exactInputWidth - kW, dW) + 1; TORCH_CHECK(outputWidth >= 1 && outputHeight >= 1, "Given input size per channel: ", IntArrayRef{inputHeight, inputWidth}, ". Calculated output size per channel: ", IntArrayRef{outputHeight, outputWidth}, ". Output size is too small"); if (weight.defined()) { const auto w_sizes = weight.sizes(); int64_t nInputPlane = w_sizes[1]; if (w_sizes.size() == 2) { nInputPlane /= (kH * kW); } TORCH_CHECK(in_sizes[dimf] == nInputPlane, "Expected input dim ", dimf, " to have size ", nInputPlane, " but got ", in_sizes[dimf]); } if (grad_output.defined()) { const auto gO_sizes = grad_output.sizes(); TORCH_CHECK(gO_sizes.size() == ndim, "Expected grad_output to have ", ndim, " dimensions but got shape", gO_sizes); if (weight.defined()) { const auto w_sizes = weight.sizes(); TORCH_CHECK(gO_sizes[dimf] == w_sizes[0], "Expected dim ", dimf, " to have size ", w_sizes[0], " but got ", gO_sizes[dimf]); } else if (bias.defined()) { const auto b_sizes = bias.sizes(); int64_t nOutputPlane = b_sizes.size() == 0 ? 1 : b_sizes[0]; TORCH_CHECK(gO_sizes[dimf] == nOutputPlane, "Expected grad_output dim ", dimf, " to have size ", nOutputPlane, " but got ", gO_sizes[dimf]); } TORCH_CHECK(gO_sizes[dimh] == outputHeight, "Expected grad_output dim ", dimh, " to have size ", outputHeight, " but got ", gO_sizes[dimh]); TORCH_CHECK(gO_sizes[dimw] == outputWidth, "Expected grad_output dim ", dimw, " to have size ", outputWidth, " but got ", gO_sizes[dimw]); } } Tensor new_view_weight_MM2d(const Tensor& weight_) { auto weight = weight_.expect_contiguous(); const auto w_sizes = weight->sizes(); TORCH_CHECK(w_sizes.size() == 4); int64_t s1 = w_sizes[0]; int64_t s2 = c10::multiply_integers(w_sizes.slice(1)); return weight->view({s1, s2}); } void slow_conv2d_forward( const Tensor &input, const Tensor &output, const Tensor &weight_, const Tensor &bias, int64_t kH, int64_t kW, int64_t dH, int64_t dW, int64_t padH, int64_t padW) { auto weight = new_view_weight_MM2d(weight_); slow_conv2d_shape_check( input, {}, weight, bias, kH, kW, dH, dW, padH, padW, /*weight_nullable*/false); constexpr int dimf = 1; constexpr int dimh = 2; constexpr int dimw = 3; auto in_sizes = input.sizes(); int64_t batchSize = in_sizes[0]; int64_t nInputPlane = in_sizes[dimf]; int64_t inputHeight = in_sizes[dimh]; int64_t inputWidth = in_sizes[dimw]; int64_t nOutputPlane = weight.sizes()[0]; int64_t outputHeight = (inputHeight + 2*padH - kH) / dH + 1; int64_t outputWidth = (inputWidth + 2*padW - kW) / dW + 1; // Resize output resize_output(output, {batchSize, nOutputPlane, outputHeight, outputWidth}); // Create temporary columns auto columns = at::empty({nInputPlane * kW * kH, outputHeight * outputWidth}, input.options()); const bool requires_columns = ( kW != 1 || kH != 1 || dW != 1 || dH != 1 || padH != 0 || padW != 0); if (bias.defined()) { TORCH_CHECK(bias.scalar_type() == input.scalar_type(), "Expected bias to have type ", input.scalar_type(), " but got ", bias.scalar_type()); output.copy_(bias.view({-1, 1, 1})); } else { output.zero_(); } AT_DISPATCH_FLOATING_TYPES_AND2(kHalf, kBFloat16, input.scalar_type(), "slow_conv2d_cuda", [&] { // For each elt in batch, do: for (int elt = 0; elt < batchSize; elt ++) { // Matrix mulitply per output: auto input_n = input.select(0, elt); auto output_n = output.select(0, elt); if (requires_columns) { // Extract columns: at::native::im2col( c10::hip::getCurrentHIPStreamMasqueradingAsCUDA(), input_n.data_ptr<scalar_t>(), nInputPlane, inputHeight, inputWidth, outputHeight, outputWidth, kH, kW, padH, padW, dH, dW, 1, 1, columns.data_ptr<scalar_t>() ); } // M,N,K are dims of matrix A and B // (see http://docs.nvidia.com/cuda/cublas/#cublas-lt-t-gt-gemm) int64_t m = nOutputPlane; int64_t n = columns.size(1); int64_t k = nInputPlane*kH*kW; // Do GEMM (note: this is a bit confusing because gemm assumes column-major matrices) auto gemm_in_ptr = requires_columns ? columns.data_ptr<scalar_t>() : input_n.data_ptr<scalar_t>(); at::cuda::blas::gemm( 'n', 'n', n, m, k, scalar_t(1), gemm_in_ptr, n, weight.data_ptr<scalar_t>(), k, scalar_t(1), output_n.data_ptr<scalar_t>(), n ); } }); } void slow_conv2d_backward( const Tensor &input, const Tensor &grad_output, const Tensor &grad_input, const Tensor &weight_, const Tensor &grad_columns, int kH, int kW, int dH, int dW, int padH, int padW) { Tensor weight = new_view_weight_MM2d(weight_); slow_conv2d_shape_check(input, grad_output, weight, {}, kH, kW, dH, dW, padH, padW, /*weight_nullable=*/false); // Params auto weight_sizes = weight.sizes(); int nInputPlane = weight_sizes[1]/(kW*kH); int nOutputPlane = weight_sizes[0]; TORCH_INTERNAL_ASSERT(grad_output.is_contiguous()); auto input_sizes = input.sizes(); int64_t inputWidth = input_sizes[3]; int64_t inputHeight = input_sizes[2]; auto output_sizes = grad_output.sizes(); int64_t outputWidth = output_sizes[3]; int64_t outputHeight = output_sizes[2]; // Batch size + input planes int64_t batchSize = input_sizes[0]; // Resize output resize_output(grad_input, input_sizes); TORCH_CHECK(grad_input.is_contiguous(), "grad_input must be contiguous"); // Resize temporary columns resize_output(grad_columns, {nInputPlane*kW*kH, outputHeight*outputWidth}); TORCH_CHECK(grad_columns.is_contiguous(), "grad_columns must be contiguous"); AT_DISPATCH_FLOATING_TYPES_AND2(kHalf, kBFloat16, input.scalar_type(), "slow_conv2d_backward_cuda", [&] { // For each elt in batch, do: for (int elt = 0; elt < batchSize; elt ++) { // Matrix mulitply per sample: auto grad_input_n = grad_input.select(0, elt); auto grad_output_n = grad_output.select(0, elt); // M,N,K are dims of matrix A and B // (see http://docs.nvidia.com/cuda/cublas/#cublas-lt-t-gt-gemm) int64_t m = nInputPlane*kW*kH; int64_t n = grad_columns.sizes()[1]; int64_t k = nOutputPlane; // Do GEMM (note: this is a bit confusing because gemm assumes column-major matrices) at::cuda::blas::gemm<scalar_t>( 'n', 't', n, m, k, scalar_t(1), grad_output_n.data_ptr<scalar_t>(), n, weight.data_ptr<scalar_t>(), m, scalar_t(0), grad_columns.data_ptr<scalar_t>(), n ); // Unpack columns back into input: using acc_t = at::acc_type<scalar_t, true>; at::native::col2im<scalar_t, acc_t>( c10::hip::getCurrentHIPStreamMasqueradingAsCUDA(), grad_columns.data_ptr<scalar_t>(), nInputPlane, inputHeight, inputWidth, outputHeight, outputWidth, kH, kW, padH, padW, dH, dW, 1, 1, grad_input_n.data_ptr<scalar_t>() ); } }); } void slow_conv2d_grad_weight( const Tensor &input, const Tensor &grad_output, const Tensor &grad_weight_, const Tensor &columns, int64_t kH, int64_t kW, int64_t dH, int64_t dW, int64_t padH, int64_t padW) { TORCH_CHECK(grad_weight_.is_contiguous(), "grad_weight needs to be contiguous"); auto grad_weight = new_view_weight_MM2d(grad_weight_); slow_conv2d_shape_check(input, grad_output, grad_weight, {}, kH, kW, dH, dW, padH, padW, /*weight_nullable=*/true); // Params TORCH_INTERNAL_ASSERT(input.is_contiguous()); TORCH_INTERNAL_ASSERT(grad_output.is_contiguous()); auto input_sizes = input.sizes(); int64_t nInputPlane = input_sizes[1]; int64_t nOutputPlane = grad_output.sizes()[1]; int64_t inputWidth = input_sizes[3]; int64_t inputHeight = input_sizes[2]; int64_t outputWidth = (inputWidth + 2*padW - kW) / dW + 1; int64_t outputHeight = (inputHeight + 2*padH - kH) / dH + 1; // Batch size + input planes int64_t batchSize = input_sizes[0]; // Resize temporary columns resize_output(columns, {nInputPlane * kH * kW, outputHeight * outputWidth}); const bool requires_columns = ( kW != 1 || kH != 1 || dW != 1 || dH != 1 || padH != 0 || padW != 0); AT_DISPATCH_FLOATING_TYPES_AND2(kHalf, kBFloat16, input.scalar_type(), "slow_conv2d_grad_weight_cuda", [&] { // For each elt in batch, do: for (int elt = 0; elt < batchSize; elt ++) { // Matrix mulitply per output: auto grad_output_n = grad_output.select(0, elt); // Matrix mulitply per output: auto input_n = input.select(0, elt); if (requires_columns) { // Extract columns: at::native::im2col<scalar_t>( c10::hip::getCurrentHIPStreamMasqueradingAsCUDA(), input_n.data_ptr<scalar_t>(), nInputPlane, inputHeight, inputWidth, outputHeight, outputWidth, kH, kW, padH, padW, dH, dW, 1, 1, columns.data_ptr<scalar_t>() ); } // M,N,K are dims of matrix A and B // (see http://docs.nvidia.com/cuda/cublas/#cublas-lt-t-gt-gemm) int64_t m = nOutputPlane; int64_t n = nInputPlane*kW*kH; int64_t k = columns.sizes()[1]; // Do GEMM (note: this is a bit confusing because gemm assumes column-major matrices) auto gemm_in_ptr = requires_columns ? columns.data_ptr<scalar_t>() : input_n.data_ptr<scalar_t>(); at::cuda::blas::gemm( 't', 'n', n, m, k, scalar_t(1), gemm_in_ptr, k, grad_output_n.data_ptr<scalar_t>(), k, scalar_t(1), grad_weight.data_ptr<scalar_t>(), n ); } }); } } // namespace (anonymous) Tensor& slow_conv2d_forward_out_cuda( const Tensor &self_, const Tensor &weight_, IntArrayRef kernel_size, const c10::optional<Tensor> &bias_, IntArrayRef stride, IntArrayRef padding, Tensor &output) { TORCH_CHECK(kernel_size.size() == 2); TORCH_CHECK(stride.size() == 2); TORCH_CHECK(padding.size() == 2); auto self = self_.expect_contiguous(); auto weight = weight_.expect_contiguous(); auto bias = [&] { if (bias_.has_value() && bias_->defined()) { return bias_->expect_contiguous(); } return MaybeOwned<Tensor>::owned(c10::in_place); }(); slow_conv2d_forward( *self, output, *weight, *bias, kernel_size[0], kernel_size[1], stride[0], stride[1], padding[0], padding[1] ); return output; } Tensor slow_conv2d_forward_cuda( const Tensor &self, const Tensor &weight, IntArrayRef kernel_size, const c10::optional<Tensor> &bias, IntArrayRef stride, IntArrayRef padding) { auto output = at::empty({0}, self.options()); return slow_conv2d_forward_out_cuda( self, weight, kernel_size, bias, stride, padding, output); } std::tuple<Tensor&, Tensor&, Tensor&> slow_conv2d_backward_out_cuda( const Tensor& grad_output_, const Tensor& self_, const Tensor& weight_, IntArrayRef kernel_size, IntArrayRef stride, IntArrayRef padding, Tensor& grad_input, Tensor& grad_weight, Tensor& grad_bias) { auto grad_output = grad_output_.expect_contiguous(); Tensor columns = at::empty({0}, self_.options()); if (grad_input.defined()) { resize_output(grad_input, self_.sizes()); auto weight = weight_.expect_contiguous(); slow_conv2d_backward( self_, *grad_output, grad_input, *weight, columns, kernel_size[0], kernel_size[1], stride[0], stride[1], padding[0], padding[1]); } if (grad_bias.defined()) { at::sum_out(grad_bias, *grad_output, IntArrayRef{0, 2, 3}); } if (grad_weight.defined()) { resize_output(grad_weight, weight_.sizes()); grad_weight.zero_(); auto self = self_.expect_contiguous(); slow_conv2d_grad_weight( *self, *grad_output, grad_weight, columns, kernel_size[0], kernel_size[1], stride[0], stride[1], padding[0], padding[1] ); } return std::tuple<Tensor&, Tensor&, Tensor&>{ grad_input, grad_weight, grad_bias}; } std::tuple<Tensor, Tensor, Tensor> slow_conv2d_backward_cuda( const Tensor& grad_output, const Tensor& self, const Tensor& weight, IntArrayRef kernel_size, IntArrayRef stride, IntArrayRef padding, std::array<bool, 3> output_mask) { Tensor grad_input; Tensor grad_weight; Tensor grad_bias; if (output_mask[0]) { grad_input = at::empty({0}, grad_output.options()); } if (output_mask[1]) { grad_weight = at::empty({0}, grad_output.options()); } if (output_mask[2]) { grad_bias = at::empty({0}, grad_output.options()); } return native::slow_conv2d_backward_out_cuda( grad_output, self, weight, kernel_size, stride, padding, grad_input, grad_weight, grad_bias); } } // namespace native } // namespace at
04caf0cd19260e082131ee90cb12f55a90908355.cu
#define TORCH_ASSERT_ONLY_METHOD_OPERATORS #include <ATen/core/Tensor.h> #include <ATen/AccumulateType.h> #include <ATen/Dispatch.h> #include <ATen/div_rtn.h> #include <ATen/cuda/CUDABlas.h> #include <ATen/native/ConvUtils.h> #include <ATen/native/Resize.h> #include <ATen/native/cuda/im2col.cuh> #ifndef AT_PER_OPERATOR_HEADERS #include <ATen/Functions.h> #include <ATen/NativeFunctions.h> #else #include <ATen/ops/_slow_conv2d_forward_native.h> #include <ATen/ops/_slow_conv2d_backward_native.h> #include <ATen/ops/empty.h> #include <ATen/ops/sum.h> #endif namespace at { namespace native { namespace { void slow_conv2d_shape_check( const Tensor& input, const Tensor& grad_output, const Tensor& weight, const Tensor& bias, int64_t kH, int64_t kW, int64_t dH, int64_t dW, int64_t padH, int64_t padW, bool weight_nullable) { TORCH_CHECK(kW > 0 && kH > 0, "kernel size should be greater than zero, but got kH: ", kH, " kW: ", kW); TORCH_CHECK(dW > 0 && dH > 0, "stride should be greater than zero, but got dH: ", dH, " dW: ", dW); TORCH_CHECK(weight_nullable || weight.defined(), "weight tensor is expected to be non-nullable"); TORCH_CHECK(!weight.defined() || ((weight.numel() > 0) && (weight.dim() == 2)), "non-empty 2D weight tensor expected, but got: ", weight.sizes()); TORCH_CHECK(!bias.defined() || (bias.dim() == 1 && bias.sizes()[0] == weight.sizes()[0]), "Expected bias to have shape [", weight.sizes()[0], "] but got ", bias.sizes()); const auto in_sizes = input.sizes(); constexpr int ndim = 4; constexpr int dimf = 1; constexpr int dimh = 2; constexpr int dimw = 3; TORCH_CHECK(in_sizes.size() == ndim, "Expected 4D input tensor, but got ", in_sizes); // Allow for empty batch size but not other dimensions const bool valid_empty = c10::multiply_integers(in_sizes.slice(1)) != 0; TORCH_CHECK(valid_empty, "non-empty input tensor expected but got: ", in_sizes); int64_t inputHeight = in_sizes[dimh]; int64_t inputWidth = in_sizes[dimw]; int64_t exactInputHeight = inputHeight + 2 * padH; int64_t exactInputWidth = inputWidth + 2 * padW; TORCH_CHECK(exactInputHeight >= kH && exactInputWidth >= kW, "Calculated padded input size per channel: ", IntArrayRef{exactInputHeight, exactInputWidth}, ". Kernel size: ", IntArrayRef{kH, kW}, ". Kernel size can't be greater than actual input size"); // NOTE: can't use conv_output_size if the weight isn't defined auto outputHeight = div_rtn<int64_t>(exactInputHeight - kH, dH) + 1; auto outputWidth = div_rtn<int64_t>(exactInputWidth - kW, dW) + 1; TORCH_CHECK(outputWidth >= 1 && outputHeight >= 1, "Given input size per channel: ", IntArrayRef{inputHeight, inputWidth}, ". Calculated output size per channel: ", IntArrayRef{outputHeight, outputWidth}, ". Output size is too small"); if (weight.defined()) { const auto w_sizes = weight.sizes(); int64_t nInputPlane = w_sizes[1]; if (w_sizes.size() == 2) { nInputPlane /= (kH * kW); } TORCH_CHECK(in_sizes[dimf] == nInputPlane, "Expected input dim ", dimf, " to have size ", nInputPlane, " but got ", in_sizes[dimf]); } if (grad_output.defined()) { const auto gO_sizes = grad_output.sizes(); TORCH_CHECK(gO_sizes.size() == ndim, "Expected grad_output to have ", ndim, " dimensions but got shape", gO_sizes); if (weight.defined()) { const auto w_sizes = weight.sizes(); TORCH_CHECK(gO_sizes[dimf] == w_sizes[0], "Expected dim ", dimf, " to have size ", w_sizes[0], " but got ", gO_sizes[dimf]); } else if (bias.defined()) { const auto b_sizes = bias.sizes(); int64_t nOutputPlane = b_sizes.size() == 0 ? 1 : b_sizes[0]; TORCH_CHECK(gO_sizes[dimf] == nOutputPlane, "Expected grad_output dim ", dimf, " to have size ", nOutputPlane, " but got ", gO_sizes[dimf]); } TORCH_CHECK(gO_sizes[dimh] == outputHeight, "Expected grad_output dim ", dimh, " to have size ", outputHeight, " but got ", gO_sizes[dimh]); TORCH_CHECK(gO_sizes[dimw] == outputWidth, "Expected grad_output dim ", dimw, " to have size ", outputWidth, " but got ", gO_sizes[dimw]); } } Tensor new_view_weight_MM2d(const Tensor& weight_) { auto weight = weight_.expect_contiguous(); const auto w_sizes = weight->sizes(); TORCH_CHECK(w_sizes.size() == 4); int64_t s1 = w_sizes[0]; int64_t s2 = c10::multiply_integers(w_sizes.slice(1)); return weight->view({s1, s2}); } void slow_conv2d_forward( const Tensor &input, const Tensor &output, const Tensor &weight_, const Tensor &bias, int64_t kH, int64_t kW, int64_t dH, int64_t dW, int64_t padH, int64_t padW) { auto weight = new_view_weight_MM2d(weight_); slow_conv2d_shape_check( input, {}, weight, bias, kH, kW, dH, dW, padH, padW, /*weight_nullable*/false); constexpr int dimf = 1; constexpr int dimh = 2; constexpr int dimw = 3; auto in_sizes = input.sizes(); int64_t batchSize = in_sizes[0]; int64_t nInputPlane = in_sizes[dimf]; int64_t inputHeight = in_sizes[dimh]; int64_t inputWidth = in_sizes[dimw]; int64_t nOutputPlane = weight.sizes()[0]; int64_t outputHeight = (inputHeight + 2*padH - kH) / dH + 1; int64_t outputWidth = (inputWidth + 2*padW - kW) / dW + 1; // Resize output resize_output(output, {batchSize, nOutputPlane, outputHeight, outputWidth}); // Create temporary columns auto columns = at::empty({nInputPlane * kW * kH, outputHeight * outputWidth}, input.options()); const bool requires_columns = ( kW != 1 || kH != 1 || dW != 1 || dH != 1 || padH != 0 || padW != 0); if (bias.defined()) { TORCH_CHECK(bias.scalar_type() == input.scalar_type(), "Expected bias to have type ", input.scalar_type(), " but got ", bias.scalar_type()); output.copy_(bias.view({-1, 1, 1})); } else { output.zero_(); } AT_DISPATCH_FLOATING_TYPES_AND2(kHalf, kBFloat16, input.scalar_type(), "slow_conv2d_cuda", [&] { // For each elt in batch, do: for (int elt = 0; elt < batchSize; elt ++) { // Matrix mulitply per output: auto input_n = input.select(0, elt); auto output_n = output.select(0, elt); if (requires_columns) { // Extract columns: at::native::im2col( c10::cuda::getCurrentCUDAStream(), input_n.data_ptr<scalar_t>(), nInputPlane, inputHeight, inputWidth, outputHeight, outputWidth, kH, kW, padH, padW, dH, dW, 1, 1, columns.data_ptr<scalar_t>() ); } // M,N,K are dims of matrix A and B // (see http://docs.nvidia.com/cuda/cublas/#cublas-lt-t-gt-gemm) int64_t m = nOutputPlane; int64_t n = columns.size(1); int64_t k = nInputPlane*kH*kW; // Do GEMM (note: this is a bit confusing because gemm assumes column-major matrices) auto gemm_in_ptr = requires_columns ? columns.data_ptr<scalar_t>() : input_n.data_ptr<scalar_t>(); at::cuda::blas::gemm( 'n', 'n', n, m, k, scalar_t(1), gemm_in_ptr, n, weight.data_ptr<scalar_t>(), k, scalar_t(1), output_n.data_ptr<scalar_t>(), n ); } }); } void slow_conv2d_backward( const Tensor &input, const Tensor &grad_output, const Tensor &grad_input, const Tensor &weight_, const Tensor &grad_columns, int kH, int kW, int dH, int dW, int padH, int padW) { Tensor weight = new_view_weight_MM2d(weight_); slow_conv2d_shape_check(input, grad_output, weight, {}, kH, kW, dH, dW, padH, padW, /*weight_nullable=*/false); // Params auto weight_sizes = weight.sizes(); int nInputPlane = weight_sizes[1]/(kW*kH); int nOutputPlane = weight_sizes[0]; TORCH_INTERNAL_ASSERT(grad_output.is_contiguous()); auto input_sizes = input.sizes(); int64_t inputWidth = input_sizes[3]; int64_t inputHeight = input_sizes[2]; auto output_sizes = grad_output.sizes(); int64_t outputWidth = output_sizes[3]; int64_t outputHeight = output_sizes[2]; // Batch size + input planes int64_t batchSize = input_sizes[0]; // Resize output resize_output(grad_input, input_sizes); TORCH_CHECK(grad_input.is_contiguous(), "grad_input must be contiguous"); // Resize temporary columns resize_output(grad_columns, {nInputPlane*kW*kH, outputHeight*outputWidth}); TORCH_CHECK(grad_columns.is_contiguous(), "grad_columns must be contiguous"); AT_DISPATCH_FLOATING_TYPES_AND2(kHalf, kBFloat16, input.scalar_type(), "slow_conv2d_backward_cuda", [&] { // For each elt in batch, do: for (int elt = 0; elt < batchSize; elt ++) { // Matrix mulitply per sample: auto grad_input_n = grad_input.select(0, elt); auto grad_output_n = grad_output.select(0, elt); // M,N,K are dims of matrix A and B // (see http://docs.nvidia.com/cuda/cublas/#cublas-lt-t-gt-gemm) int64_t m = nInputPlane*kW*kH; int64_t n = grad_columns.sizes()[1]; int64_t k = nOutputPlane; // Do GEMM (note: this is a bit confusing because gemm assumes column-major matrices) at::cuda::blas::gemm<scalar_t>( 'n', 't', n, m, k, scalar_t(1), grad_output_n.data_ptr<scalar_t>(), n, weight.data_ptr<scalar_t>(), m, scalar_t(0), grad_columns.data_ptr<scalar_t>(), n ); // Unpack columns back into input: using acc_t = at::acc_type<scalar_t, true>; at::native::col2im<scalar_t, acc_t>( c10::cuda::getCurrentCUDAStream(), grad_columns.data_ptr<scalar_t>(), nInputPlane, inputHeight, inputWidth, outputHeight, outputWidth, kH, kW, padH, padW, dH, dW, 1, 1, grad_input_n.data_ptr<scalar_t>() ); } }); } void slow_conv2d_grad_weight( const Tensor &input, const Tensor &grad_output, const Tensor &grad_weight_, const Tensor &columns, int64_t kH, int64_t kW, int64_t dH, int64_t dW, int64_t padH, int64_t padW) { TORCH_CHECK(grad_weight_.is_contiguous(), "grad_weight needs to be contiguous"); auto grad_weight = new_view_weight_MM2d(grad_weight_); slow_conv2d_shape_check(input, grad_output, grad_weight, {}, kH, kW, dH, dW, padH, padW, /*weight_nullable=*/true); // Params TORCH_INTERNAL_ASSERT(input.is_contiguous()); TORCH_INTERNAL_ASSERT(grad_output.is_contiguous()); auto input_sizes = input.sizes(); int64_t nInputPlane = input_sizes[1]; int64_t nOutputPlane = grad_output.sizes()[1]; int64_t inputWidth = input_sizes[3]; int64_t inputHeight = input_sizes[2]; int64_t outputWidth = (inputWidth + 2*padW - kW) / dW + 1; int64_t outputHeight = (inputHeight + 2*padH - kH) / dH + 1; // Batch size + input planes int64_t batchSize = input_sizes[0]; // Resize temporary columns resize_output(columns, {nInputPlane * kH * kW, outputHeight * outputWidth}); const bool requires_columns = ( kW != 1 || kH != 1 || dW != 1 || dH != 1 || padH != 0 || padW != 0); AT_DISPATCH_FLOATING_TYPES_AND2(kHalf, kBFloat16, input.scalar_type(), "slow_conv2d_grad_weight_cuda", [&] { // For each elt in batch, do: for (int elt = 0; elt < batchSize; elt ++) { // Matrix mulitply per output: auto grad_output_n = grad_output.select(0, elt); // Matrix mulitply per output: auto input_n = input.select(0, elt); if (requires_columns) { // Extract columns: at::native::im2col<scalar_t>( c10::cuda::getCurrentCUDAStream(), input_n.data_ptr<scalar_t>(), nInputPlane, inputHeight, inputWidth, outputHeight, outputWidth, kH, kW, padH, padW, dH, dW, 1, 1, columns.data_ptr<scalar_t>() ); } // M,N,K are dims of matrix A and B // (see http://docs.nvidia.com/cuda/cublas/#cublas-lt-t-gt-gemm) int64_t m = nOutputPlane; int64_t n = nInputPlane*kW*kH; int64_t k = columns.sizes()[1]; // Do GEMM (note: this is a bit confusing because gemm assumes column-major matrices) auto gemm_in_ptr = requires_columns ? columns.data_ptr<scalar_t>() : input_n.data_ptr<scalar_t>(); at::cuda::blas::gemm( 't', 'n', n, m, k, scalar_t(1), gemm_in_ptr, k, grad_output_n.data_ptr<scalar_t>(), k, scalar_t(1), grad_weight.data_ptr<scalar_t>(), n ); } }); } } // namespace (anonymous) Tensor& slow_conv2d_forward_out_cuda( const Tensor &self_, const Tensor &weight_, IntArrayRef kernel_size, const c10::optional<Tensor> &bias_, IntArrayRef stride, IntArrayRef padding, Tensor &output) { TORCH_CHECK(kernel_size.size() == 2); TORCH_CHECK(stride.size() == 2); TORCH_CHECK(padding.size() == 2); auto self = self_.expect_contiguous(); auto weight = weight_.expect_contiguous(); auto bias = [&] { if (bias_.has_value() && bias_->defined()) { return bias_->expect_contiguous(); } return MaybeOwned<Tensor>::owned(c10::in_place); }(); slow_conv2d_forward( *self, output, *weight, *bias, kernel_size[0], kernel_size[1], stride[0], stride[1], padding[0], padding[1] ); return output; } Tensor slow_conv2d_forward_cuda( const Tensor &self, const Tensor &weight, IntArrayRef kernel_size, const c10::optional<Tensor> &bias, IntArrayRef stride, IntArrayRef padding) { auto output = at::empty({0}, self.options()); return slow_conv2d_forward_out_cuda( self, weight, kernel_size, bias, stride, padding, output); } std::tuple<Tensor&, Tensor&, Tensor&> slow_conv2d_backward_out_cuda( const Tensor& grad_output_, const Tensor& self_, const Tensor& weight_, IntArrayRef kernel_size, IntArrayRef stride, IntArrayRef padding, Tensor& grad_input, Tensor& grad_weight, Tensor& grad_bias) { auto grad_output = grad_output_.expect_contiguous(); Tensor columns = at::empty({0}, self_.options()); if (grad_input.defined()) { resize_output(grad_input, self_.sizes()); auto weight = weight_.expect_contiguous(); slow_conv2d_backward( self_, *grad_output, grad_input, *weight, columns, kernel_size[0], kernel_size[1], stride[0], stride[1], padding[0], padding[1]); } if (grad_bias.defined()) { at::sum_out(grad_bias, *grad_output, IntArrayRef{0, 2, 3}); } if (grad_weight.defined()) { resize_output(grad_weight, weight_.sizes()); grad_weight.zero_(); auto self = self_.expect_contiguous(); slow_conv2d_grad_weight( *self, *grad_output, grad_weight, columns, kernel_size[0], kernel_size[1], stride[0], stride[1], padding[0], padding[1] ); } return std::tuple<Tensor&, Tensor&, Tensor&>{ grad_input, grad_weight, grad_bias}; } std::tuple<Tensor, Tensor, Tensor> slow_conv2d_backward_cuda( const Tensor& grad_output, const Tensor& self, const Tensor& weight, IntArrayRef kernel_size, IntArrayRef stride, IntArrayRef padding, std::array<bool, 3> output_mask) { Tensor grad_input; Tensor grad_weight; Tensor grad_bias; if (output_mask[0]) { grad_input = at::empty({0}, grad_output.options()); } if (output_mask[1]) { grad_weight = at::empty({0}, grad_output.options()); } if (output_mask[2]) { grad_bias = at::empty({0}, grad_output.options()); } return native::slow_conv2d_backward_out_cuda( grad_output, self, weight, kernel_size, stride, padding, grad_input, grad_weight, grad_bias); } } // namespace native } // namespace at
6691bd342f147a03a99aa25ef6bdd0d89d0d2d0e.hip
// !!! This is a file automatically generated by hipify!!! #include <sys/time.h> #include <stdio.h> #include <stdlib.h> #include <string.h> #include "mam.h" double getTime() { const double kMicro = 1.0e-6; struct timeval TV; const int RC = gettimeofday(&TV, NULL); if(RC == -1) { printf("ERROR: Bad call to gettimeofday\n"); return(-1); } return( ((double)TV.tv_sec) + kMicro * ((double)TV.tv_usec) ); } int allocate(char *type, void **ptr, size_t size) { if (strcmp(type, "malloc") == 0) { *ptr = malloc(size); if (*ptr == NULL) return 0; } else if (strcmp(type, "hipMalloc") == 0) { int err = hipMalloc(ptr, size); if (err == hipErrorMemoryAllocation) return 0; } else if (strcmp(type, "MAM_CudaMalloc") == 0) { int err = MAM_CudaMalloc(ptr, size); if (err == 0) return 0; } else { return 0; } return 1; } void deallocate (char *type, void *ptr) { if (strcmp(type, "malloc") == 0) { free(ptr); } else if (strcmp(type, "hipMalloc") == 0) { hipFree(ptr); } else if (strcmp(type, "MAM_CudaMalloc") == 0) { MAM_CudaFree(ptr); } } int main(int argc, char *argv[]) { //arguments if (argc != 4) { printf("Invalid number of argument. Exitting...\n"); exit(1); } int size = atoi(argv[1]); int n = atoi(argv[2]); char *type = argv[3]; //wake up the device char *chWake; hipMalloc( (void **) &chWake, 1); hipFree( (void *) chWake); //prepare if (strcmp(type, "MAM_CudaMalloc") == 0) { int mamerr = MAM_Create_auto(); if (mamerr == 0) { printf("unable to create mam\n"); return 0; } } void *chv = malloc(sizeof(char *) * n); char **ch = (char **) chv; int i; double begin, end, timeSpentPerAlloc; //begin experience begin = getTime(); for (i=0; i<n; i++) { int err = allocate(type, (void **) &ch[i], size); if (err == 0) { for (i-=1; i>=0; i--) { deallocate(type, ch[i]); } free(ch); printf("Allocation error. Exitting...\n"); exit(1); } } end = getTime(); timeSpentPerAlloc = (end - begin) / n; printf("%e\n", timeSpentPerAlloc); for (i=0; i<n; i++) { deallocate(type, (void *) ch[i]); } MAM_Destroy(); free(ch); return 0; }
6691bd342f147a03a99aa25ef6bdd0d89d0d2d0e.cu
#include <sys/time.h> #include <stdio.h> #include <stdlib.h> #include <string.h> #include "mam.h" double getTime() { const double kMicro = 1.0e-6; struct timeval TV; const int RC = gettimeofday(&TV, NULL); if(RC == -1) { printf("ERROR: Bad call to gettimeofday\n"); return(-1); } return( ((double)TV.tv_sec) + kMicro * ((double)TV.tv_usec) ); } int allocate(char *type, void **ptr, size_t size) { if (strcmp(type, "malloc") == 0) { *ptr = malloc(size); if (*ptr == NULL) return 0; } else if (strcmp(type, "cudaMalloc") == 0) { int err = cudaMalloc(ptr, size); if (err == cudaErrorMemoryAllocation) return 0; } else if (strcmp(type, "MAM_CudaMalloc") == 0) { int err = MAM_CudaMalloc(ptr, size); if (err == 0) return 0; } else { return 0; } return 1; } void deallocate (char *type, void *ptr) { if (strcmp(type, "malloc") == 0) { free(ptr); } else if (strcmp(type, "cudaMalloc") == 0) { cudaFree(ptr); } else if (strcmp(type, "MAM_CudaMalloc") == 0) { MAM_CudaFree(ptr); } } int main(int argc, char *argv[]) { //arguments if (argc != 4) { printf("Invalid number of argument. Exitting...\n"); exit(1); } int size = atoi(argv[1]); int n = atoi(argv[2]); char *type = argv[3]; //wake up the device char *chWake; cudaMalloc( (void **) &chWake, 1); cudaFree( (void *) chWake); //prepare if (strcmp(type, "MAM_CudaMalloc") == 0) { int mamerr = MAM_Create_auto(); if (mamerr == 0) { printf("unable to create mam\n"); return 0; } } void *chv = malloc(sizeof(char *) * n); char **ch = (char **) chv; int i; double begin, end, timeSpentPerAlloc; //begin experience begin = getTime(); for (i=0; i<n; i++) { int err = allocate(type, (void **) &ch[i], size); if (err == 0) { for (i-=1; i>=0; i--) { deallocate(type, ch[i]); } free(ch); printf("Allocation error. Exitting...\n"); exit(1); } } end = getTime(); timeSpentPerAlloc = (end - begin) / n; printf("%e\n", timeSpentPerAlloc); for (i=0; i<n; i++) { deallocate(type, (void *) ch[i]); } MAM_Destroy(); free(ch); return 0; }
88df9d836a10d9c96ad6efe148c59aea2b26561f.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include </home/yzamora/power/nvidia_gpus/cusp/blas.h> #include </home/yzamora/power/nvidia_gpus/cusp/array2d.h> #include </home/yzamora/power/nvidia_gpus/cusp/print.h> #include <iostream> #include <sys/time.h> #include <stdio.h> #include <float.h> #include <fstream> #include <stdlib.h> #include <string.h> #define NTIMES 1000 static double avgtime = 0; __global__ void set_array(float *a, float value, int len) { int idx = threadIdx.x + blockIdx.x * blockDim.x; if (idx < len) a[idx] = value; } double mysecond() { struct timeval tp; struct timezone tzp; int i = gettimeofday(&tp,&tzp); return ( (double) tp.tv_sec + (double) tp.tv_usec * 1.e-6 ); } //__global__ void blas_ex(float scalar, int len) void blas_ex(float scalar, int len) { //avgtime = mysecond(); // initialize x vector /*cusp::array1d<float, cusp::host_memory> x(4); x[0] = 1; x[1] = 2; x[2] = 5000; x[3] = 100000; // initialize y vector cusp::array1d<float, cusp::host_memory> y(4); y[0] = 1; y[1] = 2; y[2] = 5000; y[3] = 100000;*/ cusp::array1d<float,cusp::host_memory>x(len, scalar); cusp::array1d<float,cusp::host_memory>y(len, scalar); cusp::array1d<float,cusp::host_memory>z(len); cusp::blas::xmy(x,y,z); /* int idx = threadIdx.x + blockIdx.x * blockDim.x; if (idx < len) { // compute y = alpha * x + y cusp::blas::axpy(x,y,scalar); // print y //cusp::print(y); // allocate output vector //cusp::array1d<float, cusp::host_memory> z(4); //Element wise multiplication cusp::blas::xmy(x,y,z); // compute z = x .* y (element-wise multiplication) //cusp::blas::xmy(x,y,z); print z cusp::print(z); // compute the l_2 norm of z in 2 different ways std::cout << "|z| = " << cusp::blas::nrm2(z) << std::endl; std::cout << "sqrt(z'z) = " << sqrt(cusp::blas::dotc(z,z)) << std::endl; // compute the l_1 norm of z (manhattan distance) std::cout << "|z|_1 = " << cusp::blas::nrm1(z) << std::endl; // compute the largest component of a vector in absolute value std::cout << "max(|z_i|) = " << cusp::blas::nrmmax(z) << std::endl; //avgtime = mysecond() - avgtime; //std::cout << "Time to run: " << avgtime << "s" << std::endl; }*/ } int main(void) { float *d_a, *d_b, *d_c; int k; double times[1][NTIMES]; int N = 100; int bsize = 128; float scalar; /* Allocate memory on device */ hipMalloc((void**)&d_a, sizeof(float)*N); hipMalloc((void**)&d_b, sizeof(float)*N); hipMalloc((void**)&d_c, sizeof(float)*N); /* Compute execution configuration */ dim3 dimBlock(bsize); dim3 dimGrid(N/dimBlock.x ); if( N % dimBlock.x != 0 ) dimGrid.x+=1; printf(" using %d threads per block, %d blocks\n",dimBlock.x,dimGrid.x); /* Initialize memory on the device */ hipLaunchKernelGGL(( set_array), dim3(dimGrid),dim3(dimBlock), 0, 0, d_a, 2.f, N); hipLaunchKernelGGL(( set_array), dim3(dimGrid),dim3(dimBlock), 0, 0, d_b, .5f, N); hipLaunchKernelGGL(( set_array), dim3(dimGrid),dim3(dimBlock), 0, 0, d_c, .5f, N); scalar = 3.0f; /* ---- Running NTIMES for average time ----*/ for (k=0; k<NTIMES; k++) { times[0][k] = mysecond(); blas_ex(scalar, N); hipDeviceSynchronize(); times[0][k] = mysecond() - times[0][k]; } /* --- Summary ---*/ for (k=1; k<NTIMES; k++) { avgtime = avgtime + times[0][k]; } avgtime = avgtime/(double(NTIMES-1)); printf("Average time %11.8f \n", avgtime); }
88df9d836a10d9c96ad6efe148c59aea2b26561f.cu
#include </home/yzamora/power/nvidia_gpus/cusp/blas.h> #include </home/yzamora/power/nvidia_gpus/cusp/array2d.h> #include </home/yzamora/power/nvidia_gpus/cusp/print.h> #include <iostream> #include <sys/time.h> #include <stdio.h> #include <float.h> #include <fstream> #include <stdlib.h> #include <string.h> #define NTIMES 1000 static double avgtime = 0; __global__ void set_array(float *a, float value, int len) { int idx = threadIdx.x + blockIdx.x * blockDim.x; if (idx < len) a[idx] = value; } double mysecond() { struct timeval tp; struct timezone tzp; int i = gettimeofday(&tp,&tzp); return ( (double) tp.tv_sec + (double) tp.tv_usec * 1.e-6 ); } //__global__ void blas_ex(float scalar, int len) void blas_ex(float scalar, int len) { //avgtime = mysecond(); // initialize x vector /*cusp::array1d<float, cusp::host_memory> x(4); x[0] = 1; x[1] = 2; x[2] = 5000; x[3] = 100000; // initialize y vector cusp::array1d<float, cusp::host_memory> y(4); y[0] = 1; y[1] = 2; y[2] = 5000; y[3] = 100000;*/ cusp::array1d<float,cusp::host_memory>x(len, scalar); cusp::array1d<float,cusp::host_memory>y(len, scalar); cusp::array1d<float,cusp::host_memory>z(len); cusp::blas::xmy(x,y,z); /* int idx = threadIdx.x + blockIdx.x * blockDim.x; if (idx < len) { // compute y = alpha * x + y cusp::blas::axpy(x,y,scalar); // print y //cusp::print(y); // allocate output vector //cusp::array1d<float, cusp::host_memory> z(4); //Element wise multiplication cusp::blas::xmy(x,y,z); // compute z = x .* y (element-wise multiplication) //cusp::blas::xmy(x,y,z); print z cusp::print(z); // compute the l_2 norm of z in 2 different ways std::cout << "|z| = " << cusp::blas::nrm2(z) << std::endl; std::cout << "sqrt(z'z) = " << sqrt(cusp::blas::dotc(z,z)) << std::endl; // compute the l_1 norm of z (manhattan distance) std::cout << "|z|_1 = " << cusp::blas::nrm1(z) << std::endl; // compute the largest component of a vector in absolute value std::cout << "max(|z_i|) = " << cusp::blas::nrmmax(z) << std::endl; //avgtime = mysecond() - avgtime; //std::cout << "Time to run: " << avgtime << "s" << std::endl; }*/ } int main(void) { float *d_a, *d_b, *d_c; int k; double times[1][NTIMES]; int N = 100; int bsize = 128; float scalar; /* Allocate memory on device */ cudaMalloc((void**)&d_a, sizeof(float)*N); cudaMalloc((void**)&d_b, sizeof(float)*N); cudaMalloc((void**)&d_c, sizeof(float)*N); /* Compute execution configuration */ dim3 dimBlock(bsize); dim3 dimGrid(N/dimBlock.x ); if( N % dimBlock.x != 0 ) dimGrid.x+=1; printf(" using %d threads per block, %d blocks\n",dimBlock.x,dimGrid.x); /* Initialize memory on the device */ set_array<<<dimGrid,dimBlock>>>(d_a, 2.f, N); set_array<<<dimGrid,dimBlock>>>(d_b, .5f, N); set_array<<<dimGrid,dimBlock>>>(d_c, .5f, N); scalar = 3.0f; /* ---- Running NTIMES for average time ----*/ for (k=0; k<NTIMES; k++) { times[0][k] = mysecond(); blas_ex(scalar, N); cudaThreadSynchronize(); times[0][k] = mysecond() - times[0][k]; } /* --- Summary ---*/ for (k=1; k<NTIMES; k++) { avgtime = avgtime + times[0][k]; } avgtime = avgtime/(double(NTIMES-1)); printf("Average time %11.8f \n", avgtime); }
1e00b4c45ff3dbe116ea33716ef5a1c0e2e87938.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" // Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. #include "paddle/phi/kernels/shard_index_kernel.h" #include "paddle/fluid/platform/device/gpu/gpu_primitives.h" #include "paddle/phi/backends/gpu/gpu_context.h" #include "paddle/phi/core/kernel_registry.h" namespace phi { using paddle::platform::PADDLE_CUDA_NUM_THREADS; template <typename T> __global__ void ShardIndexInner(const T* in_data, T* out_data, const int64_t numel, const int index_num, const int nshards, const int shard_id, const int ignore_value) { int shard_size = (index_num + nshards - 1) / nshards; int idx = blockIdx.x * blockDim.x + threadIdx.x; if (idx < numel) { assert(in_data[idx] >= 0 && in_data[idx] < index_num); if (in_data[idx] / shard_size == shard_id) { out_data[idx] = in_data[idx] % shard_size; } else { out_data[idx] = ignore_value; } } } template <typename T, typename Context> void ShardIndexKernel(const Context& dev_ctx, const DenseTensor& in, int index_num, int nshards, int shard_id, int ignore_value, DenseTensor* out) { PADDLE_ENFORCE_GT( index_num, 0, phi::errors::InvalidArgument( "The value 'index_num' for Op(shard_index) must be greater than 0, " "but the value given is %d.", index_num)); PADDLE_ENFORCE_GT(nshards, 0, phi::errors::InvalidArgument( "The value 'nshard' for Op(shard_index) must be " "greater than 0, but the value given is %d.", nshards)); PADDLE_ENFORCE_GE( shard_id, 0, phi::errors::InvalidArgument( "The value 'shard_id' for Op(shard_index) must be greater or " "equal to 0, but the value given is %d.", shard_id)); PADDLE_ENFORCE_LT( shard_id, nshards, phi::errors::InvalidArgument( "The value 'shard_id' for Op(shard_index) must be less than " "nshards (%d), but the value given is %d.", nshards, shard_id)); out->Resize(in.dims()); out->set_lod(in.lod()); auto* in_data = in.data<T>(); auto* out_data = dev_ctx.template Alloc<T>(out); int64_t numel = in.numel(); auto stream = dev_ctx.stream(); hipLaunchKernelGGL(( ShardIndexInner< T>), dim3((numel + PADDLE_CUDA_NUM_THREADS - 1) / PADDLE_CUDA_NUM_THREADS), dim3(PADDLE_CUDA_NUM_THREADS), 0, stream, in_data, out_data, numel, index_num, nshards, shard_id, ignore_value); } } // namespace phi PD_REGISTER_KERNEL( shard_index, GPU, ALL_LAYOUT, phi::ShardIndexKernel, int, int64_t) {}
1e00b4c45ff3dbe116ea33716ef5a1c0e2e87938.cu
// Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. #include "paddle/phi/kernels/shard_index_kernel.h" #include "paddle/fluid/platform/device/gpu/gpu_primitives.h" #include "paddle/phi/backends/gpu/gpu_context.h" #include "paddle/phi/core/kernel_registry.h" namespace phi { using paddle::platform::PADDLE_CUDA_NUM_THREADS; template <typename T> __global__ void ShardIndexInner(const T* in_data, T* out_data, const int64_t numel, const int index_num, const int nshards, const int shard_id, const int ignore_value) { int shard_size = (index_num + nshards - 1) / nshards; int idx = blockIdx.x * blockDim.x + threadIdx.x; if (idx < numel) { assert(in_data[idx] >= 0 && in_data[idx] < index_num); if (in_data[idx] / shard_size == shard_id) { out_data[idx] = in_data[idx] % shard_size; } else { out_data[idx] = ignore_value; } } } template <typename T, typename Context> void ShardIndexKernel(const Context& dev_ctx, const DenseTensor& in, int index_num, int nshards, int shard_id, int ignore_value, DenseTensor* out) { PADDLE_ENFORCE_GT( index_num, 0, phi::errors::InvalidArgument( "The value 'index_num' for Op(shard_index) must be greater than 0, " "but the value given is %d.", index_num)); PADDLE_ENFORCE_GT(nshards, 0, phi::errors::InvalidArgument( "The value 'nshard' for Op(shard_index) must be " "greater than 0, but the value given is %d.", nshards)); PADDLE_ENFORCE_GE( shard_id, 0, phi::errors::InvalidArgument( "The value 'shard_id' for Op(shard_index) must be greater or " "equal to 0, but the value given is %d.", shard_id)); PADDLE_ENFORCE_LT( shard_id, nshards, phi::errors::InvalidArgument( "The value 'shard_id' for Op(shard_index) must be less than " "nshards (%d), but the value given is %d.", nshards, shard_id)); out->Resize(in.dims()); out->set_lod(in.lod()); auto* in_data = in.data<T>(); auto* out_data = dev_ctx.template Alloc<T>(out); int64_t numel = in.numel(); auto stream = dev_ctx.stream(); ShardIndexInner< T><<<(numel + PADDLE_CUDA_NUM_THREADS - 1) / PADDLE_CUDA_NUM_THREADS, PADDLE_CUDA_NUM_THREADS, 0, stream>>>( in_data, out_data, numel, index_num, nshards, shard_id, ignore_value); } } // namespace phi PD_REGISTER_KERNEL( shard_index, GPU, ALL_LAYOUT, phi::ShardIndexKernel, int, int64_t) {}
fb5cb541f7c59fe0b7b9e99a7a8c34f406c917b2.hip
// !!! This is a file automatically generated by hipify!!! #include <torch/extension.h> #include <hiprand/hiprand_kernel.h> #include <hiprand/hiprand.h> #include <hiprand/hiprand_mtgp32_host.h> #include <hip/hip_runtime.h> #include <hip/hip_fp16.h> #include <hip/hip_runtime.h> #include <vector> #include <math.h> #include <rocrand/rocrand_mtgp32_11213.h> #include "philox_random.h" #include "philox_pytorch.h" #include <stdio.h> #include <stdlib.h> #include <time.h> using namespace std; static uint64_t offset=0; // float holdy1=pow(2.0,-10.0); // float holdy2=pow(2.0,-24.0); __device__ const float twoten=0.0009765625; __device__ const float twominustwentyfour=0.000000059604644775390625; __device__ const float twoseven=0.0078125; __device__ const float two133=0.0000000000000000000000000000000000000000918354961579912115600575419704879435795832466228193376178712270530013483949005603790283203125; template<typename T> __device__ __forceinline__ T maybe_upcast(__half x){ return T(__half2float(x)); } template<> __device__ __forceinline__ __half maybe_upcast<__half>(__half x){ return x; } //this one is for fp16 // __device__ __forceinline__ float get_delta_fp16(float x){ // int e_actual; // frexpf(x, &e_actual); // e_actual-=1; // // int e_actual=e_stored-127; // if(e_actual>=-14){ // return twoten*pow(2,e_actual); // } // else{ // return twominustwentyfour; // } // } __device__ __forceinline__ float get_delta_fp16(float x){ int e_actual; frexpf(x, &e_actual); e_actual-=1; // int e_actual=e_stored-127; if(e_actual>=-126){ return twoseven*pow(2,e_actual); } else{ return two133; } } //for fp16 // template <typename scalar_t> // __device__ __forceinline__ scalar_t natalia_magic(float x,hiprandStatePhilox4_32_10_t state){ // if(x==0.0){ // return scalar_t(0.0); // } // float delta=get_delta_fp16(x); // float randy=hiprand_uniform(&state); // float val; // if(x<0.0){ // val=x-randy*delta; // } // else{ // val=x+randy*delta; // } // // To guarantee representability, route through a guaranteed FP16 cast. // return maybe_upcast<scalar_t>(__float2half_rz(val)); // } //bfloat16 template <typename scalar_t> __device__ __forceinline__ scalar_t natalia_magic(float x,hiprandStatePhilox4_32_10_t state){ if(x==0.0){ return scalar_t(0.0); } float delta=get_delta_fp16(x); float randy=hiprand_uniform(&state); float val; if(x<0.0){ val=x-randy*delta; } else{ val=x+randy*delta; } // To guarantee representability, route through a guaranteed FP16 cast. return scalar_t(float(uint32_t((val)) & 0xFFFF0000)); } template <typename scalar_t> __global__ void stochround(float* mtx,scalar_t* new_mtx, int n, uint64_t seed, uint64_t offset){ int threadnum=blockDim.x*blockIdx.x+threadIdx.x; hiprandStatePhilox4_32_10_t state; hiprand_init(seed,threadnum,offset,&state); for(int i = threadnum; i <n ; i +=blockDim.x*gridDim.x ){ float mtx_holder=static_cast<float>(mtx[i]); new_mtx[i]=natalia_magic<scalar_t>(mtx_holder,state); } } torch::Tensor stochroundfortensor(torch::Tensor mtx,torch::Tensor half_mtx){ torch::IntArrayRef sizes=mtx.sizes(); int dims=sizes.size(); size_t n = 1; for(int county=0;county<dims;county++){ n=n*sizes[county]; } uint64_t seed= 12345ul; const int threads = 256.0; // printf("%d \n \n \n \n ",offset); float sm_max=72.0; float numthreads_per_sm=1024.0; const dim3 blocks(ceil(sm_max*numthreads_per_sm/threads),1,1); AT_DISPATCH_FLOATING_TYPES(half_mtx.scalar_type(),"stochastic_tensor_round",([&] hipLaunchKernelGGL(({stochround<scalar_t>), dim3(blocks), dim3(threads), 0, 0, mtx.data<float>(),half_mtx.data<scalar_t>(),n,seed,offset);})); offset = offset + (n + blocks.x*threads - 1)/(blocks.x*threads); // printf("%d \n \n \n \n ",offset); return half_mtx; }
fb5cb541f7c59fe0b7b9e99a7a8c34f406c917b2.cu
#include <torch/extension.h> #include <curand_kernel.h> #include <curand.h> #include <curand_mtgp32_host.h> #include <cuda.h> #include <cuda_fp16.h> #include <cuda_runtime.h> #include <vector> #include <math.h> #include <curand_mtgp32dc_p_11213.h> #include "philox_random.h" #include "philox_pytorch.h" #include <stdio.h> #include <stdlib.h> #include <time.h> using namespace std; static uint64_t offset=0; // float holdy1=pow(2.0,-10.0); // float holdy2=pow(2.0,-24.0); __device__ const float twoten=0.0009765625; __device__ const float twominustwentyfour=0.000000059604644775390625; __device__ const float twoseven=0.0078125; __device__ const float two133=0.0000000000000000000000000000000000000000918354961579912115600575419704879435795832466228193376178712270530013483949005603790283203125; template<typename T> __device__ __forceinline__ T maybe_upcast(__half x){ return T(__half2float(x)); } template<> __device__ __forceinline__ __half maybe_upcast<__half>(__half x){ return x; } //this one is for fp16 // __device__ __forceinline__ float get_delta_fp16(float x){ // int e_actual; // frexpf(x, &e_actual); // e_actual-=1; // // int e_actual=e_stored-127; // if(e_actual>=-14){ // return twoten*pow(2,e_actual); // } // else{ // return twominustwentyfour; // } // } __device__ __forceinline__ float get_delta_fp16(float x){ int e_actual; frexpf(x, &e_actual); e_actual-=1; // int e_actual=e_stored-127; if(e_actual>=-126){ return twoseven*pow(2,e_actual); } else{ return two133; } } //for fp16 // template <typename scalar_t> // __device__ __forceinline__ scalar_t natalia_magic(float x,curandStatePhilox4_32_10_t state){ // if(x==0.0){ // return scalar_t(0.0); // } // float delta=get_delta_fp16(x); // float randy=curand_uniform(&state); // float val; // if(x<0.0){ // val=x-randy*delta; // } // else{ // val=x+randy*delta; // } // // To guarantee representability, route through a guaranteed FP16 cast. // return maybe_upcast<scalar_t>(__float2half_rz(val)); // } //bfloat16 template <typename scalar_t> __device__ __forceinline__ scalar_t natalia_magic(float x,curandStatePhilox4_32_10_t state){ if(x==0.0){ return scalar_t(0.0); } float delta=get_delta_fp16(x); float randy=curand_uniform(&state); float val; if(x<0.0){ val=x-randy*delta; } else{ val=x+randy*delta; } // To guarantee representability, route through a guaranteed FP16 cast. return scalar_t(float(uint32_t((val)) & 0xFFFF0000)); } template <typename scalar_t> __global__ void stochround(float* mtx,scalar_t* new_mtx, int n, uint64_t seed, uint64_t offset){ int threadnum=blockDim.x*blockIdx.x+threadIdx.x; curandStatePhilox4_32_10_t state; curand_init(seed,threadnum,offset,&state); for(int i = threadnum; i <n ; i +=blockDim.x*gridDim.x ){ float mtx_holder=static_cast<float>(mtx[i]); new_mtx[i]=natalia_magic<scalar_t>(mtx_holder,state); } } torch::Tensor stochroundfortensor(torch::Tensor mtx,torch::Tensor half_mtx){ torch::IntArrayRef sizes=mtx.sizes(); int dims=sizes.size(); size_t n = 1; for(int county=0;county<dims;county++){ n=n*sizes[county]; } uint64_t seed= 12345ul; const int threads = 256.0; // printf("%d \n \n \n \n ",offset); float sm_max=72.0; float numthreads_per_sm=1024.0; const dim3 blocks(ceil(sm_max*numthreads_per_sm/threads),1,1); AT_DISPATCH_FLOATING_TYPES(half_mtx.scalar_type(),"stochastic_tensor_round",([&] {stochround<scalar_t><<<blocks, threads>>>(mtx.data<float>(),half_mtx.data<scalar_t>(),n,seed,offset);})); offset = offset + (n + blocks.x*threads - 1)/(blocks.x*threads); // printf("%d \n \n \n \n ",offset); return half_mtx; }
837697d35da1780554cee0da839cc29b8029c149.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include <algorithm> #include <vector> <<<<<<< HEAD #include "caffe/layers/bnll_layer.hpp" ======= #include "caffe/layer.hpp" #include "caffe/vision_layers.hpp" >>>>>>> caffe-yolo/master namespace caffe { const float kBNLL_THRESHOLD = 50.; template <typename Dtype> __global__ void BNLLForward(const int n, const Dtype* in, Dtype* out) { CUDA_KERNEL_LOOP(index, n) { out[index] = in[index] > 0 ? in[index] + log(1. + exp(-in[index])) : log(1. + exp(in[index])); } } template <typename Dtype> void BNLLLayer<Dtype>::Forward_gpu(const vector<Blob<Dtype>*>& bottom, const vector<Blob<Dtype>*>& top) { const Dtype* bottom_data = bottom[0]->gpu_data(); Dtype* top_data = top[0]->mutable_gpu_data(); const int count = bottom[0]->count(); // NOLINT_NEXT_LINE(whitespace/operators) hipLaunchKernelGGL(( BNLLForward<Dtype>), dim3(CAFFE_GET_BLOCKS(count)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0, count, bottom_data, top_data); CUDA_POST_KERNEL_CHECK; } template <typename Dtype> __global__ void BNLLBackward(const int n, const Dtype* in_diff, const Dtype* in_data, Dtype* out_diff) { CUDA_KERNEL_LOOP(index, n) { Dtype expval = exp(min(in_data[index], Dtype(kBNLL_THRESHOLD))); out_diff[index] = in_diff[index] * expval / (expval + 1.); } } template <typename Dtype> void BNLLLayer<Dtype>::Backward_gpu(const vector<Blob<Dtype>*>& top, const vector<bool>& propagate_down, const vector<Blob<Dtype>*>& bottom) { if (propagate_down[0]) { const Dtype* bottom_data = bottom[0]->gpu_data(); const Dtype* top_diff = top[0]->gpu_diff(); Dtype* bottom_diff = bottom[0]->mutable_gpu_diff(); const int count = bottom[0]->count(); // NOLINT_NEXT_LINE(whitespace/operators) hipLaunchKernelGGL(( BNLLBackward<Dtype>), dim3(CAFFE_GET_BLOCKS(count)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0, count, top_diff, bottom_data, bottom_diff); CUDA_POST_KERNEL_CHECK; } } INSTANTIATE_LAYER_GPU_FUNCS(BNLLLayer); } // namespace caffe
837697d35da1780554cee0da839cc29b8029c149.cu
#include <algorithm> #include <vector> <<<<<<< HEAD #include "caffe/layers/bnll_layer.hpp" ======= #include "caffe/layer.hpp" #include "caffe/vision_layers.hpp" >>>>>>> caffe-yolo/master namespace caffe { const float kBNLL_THRESHOLD = 50.; template <typename Dtype> __global__ void BNLLForward(const int n, const Dtype* in, Dtype* out) { CUDA_KERNEL_LOOP(index, n) { out[index] = in[index] > 0 ? in[index] + log(1. + exp(-in[index])) : log(1. + exp(in[index])); } } template <typename Dtype> void BNLLLayer<Dtype>::Forward_gpu(const vector<Blob<Dtype>*>& bottom, const vector<Blob<Dtype>*>& top) { const Dtype* bottom_data = bottom[0]->gpu_data(); Dtype* top_data = top[0]->mutable_gpu_data(); const int count = bottom[0]->count(); // NOLINT_NEXT_LINE(whitespace/operators) BNLLForward<Dtype><<<CAFFE_GET_BLOCKS(count), CAFFE_CUDA_NUM_THREADS>>>( count, bottom_data, top_data); CUDA_POST_KERNEL_CHECK; } template <typename Dtype> __global__ void BNLLBackward(const int n, const Dtype* in_diff, const Dtype* in_data, Dtype* out_diff) { CUDA_KERNEL_LOOP(index, n) { Dtype expval = exp(min(in_data[index], Dtype(kBNLL_THRESHOLD))); out_diff[index] = in_diff[index] * expval / (expval + 1.); } } template <typename Dtype> void BNLLLayer<Dtype>::Backward_gpu(const vector<Blob<Dtype>*>& top, const vector<bool>& propagate_down, const vector<Blob<Dtype>*>& bottom) { if (propagate_down[0]) { const Dtype* bottom_data = bottom[0]->gpu_data(); const Dtype* top_diff = top[0]->gpu_diff(); Dtype* bottom_diff = bottom[0]->mutable_gpu_diff(); const int count = bottom[0]->count(); // NOLINT_NEXT_LINE(whitespace/operators) BNLLBackward<Dtype><<<CAFFE_GET_BLOCKS(count), CAFFE_CUDA_NUM_THREADS>>>( count, top_diff, bottom_data, bottom_diff); CUDA_POST_KERNEL_CHECK; } } INSTANTIATE_LAYER_GPU_FUNCS(BNLLLayer); } // namespace caffe
7bd06cf583cab00579a916ea00e887bb4b1dbedc.hip
// !!! This is a file automatically generated by hipify!!! /* * Copyright 1993-2015 NVIDIA Corporation. All rights reserved. * * Please refer to the NVIDIA end user license agreement (EULA) associated * with this source code for terms and conditions that govern your use of * this software. Any use, reproduction, disclosure, or distribution of * this software and related documentation outside the terms of the EULA * is strictly prohibited. * */ // System includes. #include <stdio.h> #include <iostream> // STL. #include <vector> // CUDA runtime. #include <hip/hip_runtime.h> // Helper functions and utilities to work with CUDA. #include <helper_functions.h> #include <helper_cuda.h> // Device library includes. #include "simpleDeviceLibrary.cuh" using std::cout; using std::endl; using std::vector; #define EPS 1e-5 typedef unsigned int uint; typedef float(*deviceFunc)(float); const char *sampleName = "simpleSeparateCompilation"; //////////////////////////////////////////////////////////////////////////////// // Auto-Verification Code bool testResult = true; //////////////////////////////////////////////////////////////////////////////// // Static device pointers to __device__ functions. __device__ deviceFunc dMultiplyByTwoPtr = multiplyByTwo; __device__ deviceFunc dDivideByTwoPtr = divideByTwo; //////////////////////////////////////////////////////////////////////////////// // Kernels //////////////////////////////////////////////////////////////////////////////// //! Transforms vector. //! Applies the __device__ function "f" to each element of the vector "v". //////////////////////////////////////////////////////////////////////////////// __global__ void transformVector(float *v, deviceFunc f, uint size) { uint tid = blockIdx.x * blockDim.x + threadIdx.x; if (tid < size) { v[tid] = (*f)(v[tid]); } } //////////////////////////////////////////////////////////////////////////////// // Declaration, forward void runTest(int argc, const char **argv); //////////////////////////////////////////////////////////////////////////////// // Program main //////////////////////////////////////////////////////////////////////////////// int main(int argc, char **argv) { cout << sampleName << " starting..." << endl; runTest(argc, (const char **)argv); cout << sampleName << " completed, returned " << (testResult ? "OK" : "ERROR") << endl; exit(testResult ? EXIT_SUCCESS : EXIT_FAILURE); } void runTest(int argc, const char **argv) { try { int devID; //hipError_t error; hipDeviceProp_t deviceProp; // This will pick the best possible CUDA capable device. devID = findCudaDevice(argc, (const char **) argv); checkCudaErrors(hipGetDeviceProperties(&deviceProp, devID)); if (deviceProp.major < 2) { cout << sampleName << " requires a GPU with compute capability " << "2.0 or later, exiting..." << endl; // hipDeviceReset causes the driver to clean up all state. While // not mandatory in normal operation, it is good practice. It is also // needed to ensure correct operation when the application is being // profiled. Calling hipDeviceReset causes all profile data to be // flushed before the application exits hipDeviceReset(); exit(EXIT_SUCCESS); } // Create host vector. const uint kVectorSize = 1000; vector<float> hVector(kVectorSize); for (uint i = 0; i < kVectorSize; ++i) { hVector[i] = rand() / static_cast<float>(RAND_MAX); } // Create and populate device vector. float *dVector; checkCudaErrors(hipMalloc(&dVector, kVectorSize * sizeof(float))); checkCudaErrors(hipMemcpy(dVector, &hVector[0], kVectorSize * sizeof(float), hipMemcpyHostToDevice)); // Kernel configuration, where a one-dimensional // grid and one-dimensional blocks are configured. const int nThreads = 1024; const int nBlocks = 1; dim3 dimGrid(nBlocks); dim3 dimBlock(nThreads); // Test library functions. deviceFunc hFunctionPtr; hipMemcpyFromSymbol(&hFunctionPtr, dMultiplyByTwoPtr, sizeof(deviceFunc)); hipLaunchKernelGGL(( transformVector), dim3(dimGrid), dim3(dimBlock), 0, 0, dVector, hFunctionPtr, kVectorSize); checkCudaErrors(hipGetLastError()); hipMemcpyFromSymbol(&hFunctionPtr, dDivideByTwoPtr, sizeof(deviceFunc)); hipLaunchKernelGGL(( transformVector), dim3(dimGrid), dim3(dimBlock), 0, 0, dVector, hFunctionPtr, kVectorSize); checkCudaErrors(hipGetLastError()); // Download results. vector<float> hResultVector(kVectorSize); checkCudaErrors(hipMemcpy(&hResultVector[0], dVector, kVectorSize *sizeof(float), hipMemcpyDeviceToHost)); // Check results. for (int i = 0; i < kVectorSize; ++i) { if (fabs(hVector[i] - hResultVector[i]) > EPS) { cout << "Computations were incorrect..." << endl; testResult = false; break; } } // Free resources. if (dVector) checkCudaErrors(hipFree(dVector)); // hipDeviceReset causes the driver to clean up all state. While // not mandatory in normal operation, it is good practice. It is also // needed to ensure correct operation when the application is being // profiled. Calling hipDeviceReset causes all profile data to be // flushed before the application exits checkCudaErrors(hipDeviceReset()); } catch (...) { cout << "Error occured, exiting..." << endl; // hipDeviceReset causes the driver to clean up all state. While // not mandatory in normal operation, it is good practice. It is also // needed to ensure correct operation when the application is being // profiled. Calling hipDeviceReset causes all profile data to be // flushed before the application exits hipDeviceReset(); exit(EXIT_FAILURE); } }
7bd06cf583cab00579a916ea00e887bb4b1dbedc.cu
/* * Copyright 1993-2015 NVIDIA Corporation. All rights reserved. * * Please refer to the NVIDIA end user license agreement (EULA) associated * with this source code for terms and conditions that govern your use of * this software. Any use, reproduction, disclosure, or distribution of * this software and related documentation outside the terms of the EULA * is strictly prohibited. * */ // System includes. #include <stdio.h> #include <iostream> // STL. #include <vector> // CUDA runtime. #include <cuda_runtime.h> // Helper functions and utilities to work with CUDA. #include <helper_functions.h> #include <helper_cuda.h> // Device library includes. #include "simpleDeviceLibrary.cuh" using std::cout; using std::endl; using std::vector; #define EPS 1e-5 typedef unsigned int uint; typedef float(*deviceFunc)(float); const char *sampleName = "simpleSeparateCompilation"; //////////////////////////////////////////////////////////////////////////////// // Auto-Verification Code bool testResult = true; //////////////////////////////////////////////////////////////////////////////// // Static device pointers to __device__ functions. __device__ deviceFunc dMultiplyByTwoPtr = multiplyByTwo; __device__ deviceFunc dDivideByTwoPtr = divideByTwo; //////////////////////////////////////////////////////////////////////////////// // Kernels //////////////////////////////////////////////////////////////////////////////// //! Transforms vector. //! Applies the __device__ function "f" to each element of the vector "v". //////////////////////////////////////////////////////////////////////////////// __global__ void transformVector(float *v, deviceFunc f, uint size) { uint tid = blockIdx.x * blockDim.x + threadIdx.x; if (tid < size) { v[tid] = (*f)(v[tid]); } } //////////////////////////////////////////////////////////////////////////////// // Declaration, forward void runTest(int argc, const char **argv); //////////////////////////////////////////////////////////////////////////////// // Program main //////////////////////////////////////////////////////////////////////////////// int main(int argc, char **argv) { cout << sampleName << " starting..." << endl; runTest(argc, (const char **)argv); cout << sampleName << " completed, returned " << (testResult ? "OK" : "ERROR") << endl; exit(testResult ? EXIT_SUCCESS : EXIT_FAILURE); } void runTest(int argc, const char **argv) { try { int devID; //cudaError_t error; cudaDeviceProp deviceProp; // This will pick the best possible CUDA capable device. devID = findCudaDevice(argc, (const char **) argv); checkCudaErrors(cudaGetDeviceProperties(&deviceProp, devID)); if (deviceProp.major < 2) { cout << sampleName << " requires a GPU with compute capability " << "2.0 or later, exiting..." << endl; // cudaDeviceReset causes the driver to clean up all state. While // not mandatory in normal operation, it is good practice. It is also // needed to ensure correct operation when the application is being // profiled. Calling cudaDeviceReset causes all profile data to be // flushed before the application exits cudaDeviceReset(); exit(EXIT_SUCCESS); } // Create host vector. const uint kVectorSize = 1000; vector<float> hVector(kVectorSize); for (uint i = 0; i < kVectorSize; ++i) { hVector[i] = rand() / static_cast<float>(RAND_MAX); } // Create and populate device vector. float *dVector; checkCudaErrors(cudaMalloc(&dVector, kVectorSize * sizeof(float))); checkCudaErrors(cudaMemcpy(dVector, &hVector[0], kVectorSize * sizeof(float), cudaMemcpyHostToDevice)); // Kernel configuration, where a one-dimensional // grid and one-dimensional blocks are configured. const int nThreads = 1024; const int nBlocks = 1; dim3 dimGrid(nBlocks); dim3 dimBlock(nThreads); // Test library functions. deviceFunc hFunctionPtr; cudaMemcpyFromSymbol(&hFunctionPtr, dMultiplyByTwoPtr, sizeof(deviceFunc)); transformVector<<<dimGrid, dimBlock>>> (dVector, hFunctionPtr, kVectorSize); checkCudaErrors(cudaGetLastError()); cudaMemcpyFromSymbol(&hFunctionPtr, dDivideByTwoPtr, sizeof(deviceFunc)); transformVector<<<dimGrid, dimBlock>>> (dVector, hFunctionPtr, kVectorSize); checkCudaErrors(cudaGetLastError()); // Download results. vector<float> hResultVector(kVectorSize); checkCudaErrors(cudaMemcpy(&hResultVector[0], dVector, kVectorSize *sizeof(float), cudaMemcpyDeviceToHost)); // Check results. for (int i = 0; i < kVectorSize; ++i) { if (fabs(hVector[i] - hResultVector[i]) > EPS) { cout << "Computations were incorrect..." << endl; testResult = false; break; } } // Free resources. if (dVector) checkCudaErrors(cudaFree(dVector)); // cudaDeviceReset causes the driver to clean up all state. While // not mandatory in normal operation, it is good practice. It is also // needed to ensure correct operation when the application is being // profiled. Calling cudaDeviceReset causes all profile data to be // flushed before the application exits checkCudaErrors(cudaDeviceReset()); } catch (...) { cout << "Error occured, exiting..." << endl; // cudaDeviceReset causes the driver to clean up all state. While // not mandatory in normal operation, it is good practice. It is also // needed to ensure correct operation when the application is being // profiled. Calling cudaDeviceReset causes all profile data to be // flushed before the application exits cudaDeviceReset(); exit(EXIT_FAILURE); } }
6b6c00f6ac1e17c18d38954dd0c35ce86c296931.hip
// !!! This is a file automatically generated by hipify!!! #include <stdbool.h> #include <stdio.h> #include <string.h> #include <getopt.h> #include <hiprand/hiprand_kernel.h> #include <stdlib.h> #include <hip/hip_runtime.h> #include <sys/time.h> #include "FullyConnectedUpdateMemoryKernel.cu" #include<chrono> #include<iostream> using namespace std; using namespace std::chrono; int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}}; int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}}; int main(int argc, char **argv) { hipSetDevice(0); char* p;int matrix_len=strtol(argv[1], &p, 10); for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){ for(int block_looper=0;block_looper<20;block_looper++){ int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1]; float *avgWeightGradPtr = NULL; hipMalloc(&avgWeightGradPtr, XSIZE*YSIZE); float *avgBiasGradPtr = NULL; hipMalloc(&avgBiasGradPtr, XSIZE*YSIZE); float *avgWeightGradVarPtr = NULL; hipMalloc(&avgWeightGradVarPtr, XSIZE*YSIZE); float *avgBiasGradVarPtr = NULL; hipMalloc(&avgBiasGradVarPtr, XSIZE*YSIZE); float *weightMemorySizePtr = NULL; hipMalloc(&weightMemorySizePtr, XSIZE*YSIZE); float *biasMemorySizePtr = NULL; hipMalloc(&biasMemorySizePtr, XSIZE*YSIZE); float *dropoutMaskPtr = NULL; hipMalloc(&dropoutMaskPtr, XSIZE*YSIZE); int prevLayerSize = XSIZE*YSIZE; int thisLayerSize = XSIZE*YSIZE; int iXSIZE= XSIZE; int iYSIZE= YSIZE; while(iXSIZE%BLOCKX!=0) { iXSIZE++; } while(iYSIZE%BLOCKY!=0) { iYSIZE++; } dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY); dim3 threadBlock(BLOCKX, BLOCKY); hipFree(0);hipLaunchKernelGGL(( FullyConnectedUpdateMemoryKernel), dim3(gridBlock),dim3(threadBlock), 0, 0, avgWeightGradPtr,avgBiasGradPtr,avgWeightGradVarPtr,avgBiasGradVarPtr,weightMemorySizePtr,biasMemorySizePtr,dropoutMaskPtr,prevLayerSize,thisLayerSize); hipDeviceSynchronize(); for (int loop_counter = 0; loop_counter < 10; ++loop_counter) {hipLaunchKernelGGL(( FullyConnectedUpdateMemoryKernel), dim3(gridBlock),dim3(threadBlock), 0, 0, avgWeightGradPtr,avgBiasGradPtr,avgWeightGradVarPtr,avgBiasGradVarPtr,weightMemorySizePtr,biasMemorySizePtr,dropoutMaskPtr,prevLayerSize,thisLayerSize); } auto start = steady_clock::now(); for (int loop_counter = 0; loop_counter < 1000; loop_counter++) {hipLaunchKernelGGL(( FullyConnectedUpdateMemoryKernel), dim3(gridBlock),dim3(threadBlock), 0, 0, avgWeightGradPtr,avgBiasGradPtr,avgWeightGradVarPtr,avgBiasGradVarPtr,weightMemorySizePtr,biasMemorySizePtr,dropoutMaskPtr,prevLayerSize,thisLayerSize); } auto end = steady_clock::now(); auto usecs = duration_cast<duration<float, microseconds::period> >(end - start); cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl; } }}
6b6c00f6ac1e17c18d38954dd0c35ce86c296931.cu
#include <stdbool.h> #include <stdio.h> #include <string.h> #include <getopt.h> #include <curand_kernel.h> #include <stdlib.h> #include <cuda.h> #include <sys/time.h> #include "FullyConnectedUpdateMemoryKernel.cu" #include<chrono> #include<iostream> using namespace std; using namespace std::chrono; int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}}; int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}}; int main(int argc, char **argv) { cudaSetDevice(0); char* p;int matrix_len=strtol(argv[1], &p, 10); for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){ for(int block_looper=0;block_looper<20;block_looper++){ int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1]; float *avgWeightGradPtr = NULL; cudaMalloc(&avgWeightGradPtr, XSIZE*YSIZE); float *avgBiasGradPtr = NULL; cudaMalloc(&avgBiasGradPtr, XSIZE*YSIZE); float *avgWeightGradVarPtr = NULL; cudaMalloc(&avgWeightGradVarPtr, XSIZE*YSIZE); float *avgBiasGradVarPtr = NULL; cudaMalloc(&avgBiasGradVarPtr, XSIZE*YSIZE); float *weightMemorySizePtr = NULL; cudaMalloc(&weightMemorySizePtr, XSIZE*YSIZE); float *biasMemorySizePtr = NULL; cudaMalloc(&biasMemorySizePtr, XSIZE*YSIZE); float *dropoutMaskPtr = NULL; cudaMalloc(&dropoutMaskPtr, XSIZE*YSIZE); int prevLayerSize = XSIZE*YSIZE; int thisLayerSize = XSIZE*YSIZE; int iXSIZE= XSIZE; int iYSIZE= YSIZE; while(iXSIZE%BLOCKX!=0) { iXSIZE++; } while(iYSIZE%BLOCKY!=0) { iYSIZE++; } dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY); dim3 threadBlock(BLOCKX, BLOCKY); cudaFree(0); FullyConnectedUpdateMemoryKernel<<<gridBlock,threadBlock>>>(avgWeightGradPtr,avgBiasGradPtr,avgWeightGradVarPtr,avgBiasGradVarPtr,weightMemorySizePtr,biasMemorySizePtr,dropoutMaskPtr,prevLayerSize,thisLayerSize); cudaDeviceSynchronize(); for (int loop_counter = 0; loop_counter < 10; ++loop_counter) { FullyConnectedUpdateMemoryKernel<<<gridBlock,threadBlock>>>(avgWeightGradPtr,avgBiasGradPtr,avgWeightGradVarPtr,avgBiasGradVarPtr,weightMemorySizePtr,biasMemorySizePtr,dropoutMaskPtr,prevLayerSize,thisLayerSize); } auto start = steady_clock::now(); for (int loop_counter = 0; loop_counter < 1000; loop_counter++) { FullyConnectedUpdateMemoryKernel<<<gridBlock,threadBlock>>>(avgWeightGradPtr,avgBiasGradPtr,avgWeightGradVarPtr,avgBiasGradVarPtr,weightMemorySizePtr,biasMemorySizePtr,dropoutMaskPtr,prevLayerSize,thisLayerSize); } auto end = steady_clock::now(); auto usecs = duration_cast<duration<float, microseconds::period> >(end - start); cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl; } }}
a917d5b04d91fd5bda9d8bf63c18f4cf43c0db45.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" // // Copyright (c) 2019, NVIDIA CORPORATION. All rights reserved. // // Redistribution and use in source and binary forms, with or without // modification, are permitted provided that the following conditions // are met: // * Redistributions of source code must retain the above copyright // notice, this list of conditions and the following disclaimer. // * Redistributions in binary form must reproduce the above copyright // notice, this list of conditions and the following disclaimer in the // documentation and/or other materials provided with the distribution. // * Neither the name of NVIDIA CORPORATION nor the names of its // contributors may be used to endorse or promote products derived // from this software without specific prior written permission. // // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ``AS IS'' AND ANY // EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE // IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR // PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR // CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, // EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, // PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR // PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY // OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. // #define OPTIX_COMPATIBILITY 7 #include <optix.h> #include "optixNVLink.h" #include <sutil/vec_math.h> #include <cuda/helpers.h> #include <cuda/random.h> extern "C" { __constant__ Params params; } //------------------------------------------------------------------------------ // // Per ray data, and getting at it // //------------------------------------------------------------------------------ // Per-ray data for radiance rays struct RadiancePRD { float3 emitted; float3 radiance; float3 attenuation; float3 origin; float3 direction; unsigned int seed; int countEmitted; int done; int pad; }; static __forceinline__ __device__ void* unpackPointer( unsigned int i0, unsigned int i1 ) { const unsigned long long uptr = static_cast<unsigned long long>( i0 ) << 32 | i1; void* ptr = reinterpret_cast<void*>( uptr ); return ptr; } static __forceinline__ __device__ void packPointer( void* ptr, unsigned int& i0, unsigned int& i1 ) { const unsigned long long uptr = reinterpret_cast<unsigned long long>( ptr ); i0 = uptr >> 32; i1 = uptr & 0x00000000ffffffff; } static __forceinline__ __device__ RadiancePRD* getPRD() { const unsigned int u0 = optixGetPayload_0(); const unsigned int u1 = optixGetPayload_1(); return reinterpret_cast<RadiancePRD*>( unpackPointer( u0, u1 ) ); } // Per-ray data for occlusion rays static __forceinline__ __device__ void setPayloadOcclusion( bool occluded ) { optixSetPayload_0( static_cast<unsigned int>( occluded ) ); } //------------------------------------------------------------------------------ // // Sampling and color // //------------------------------------------------------------------------------ struct Onb { __forceinline__ __device__ Onb(const float3& normal) { m_normal = normal; if( fabs(m_normal.x) > fabs(m_normal.z) ) { m_binormal.x = -m_normal.y; m_binormal.y = m_normal.x; m_binormal.z = 0; } else { m_binormal.x = 0; m_binormal.y = -m_normal.z; m_binormal.z = m_normal.y; } m_binormal = normalize(m_binormal); m_tangent = cross( m_binormal, m_normal ); } __forceinline__ __device__ void inverse_transform(float3& p) const { p = p.x*m_tangent + p.y*m_binormal + p.z*m_normal; } float3 m_tangent; float3 m_binormal; float3 m_normal; }; static __forceinline__ __device__ void cosine_sample_hemisphere(const float u1, const float u2, float3& p) { // Uniformly sample disk. const float r = sqrtf( u1 ); const float phi = 2.0f*M_PIf * u2; p.x = r * cosf( phi ); p.y = r * sinf( phi ); // Project up to hemisphere. p.z = sqrtf( fmaxf( 0.0f, 1.0f - p.x*p.x - p.y*p.y ) ); } __forceinline__ __device__ float3 deviceColor( unsigned int idx ) { return make_float3( idx == 0 ? 0.05f : 0.0f, idx == 1 ? 0.05f : 0.0f, idx == 2 ? 0.05f : 0.0f ); } //------------------------------------------------------------------------------ // // Tracing rays // //------------------------------------------------------------------------------ static __forceinline__ __device__ void traceRadiance( OptixTraversableHandle handle, float3 ray_origin, float3 ray_direction, float tmin, float tmax, RadiancePRD* prd ) { unsigned int u0, u1; packPointer( prd, u0, u1 ); optixTrace( handle, ray_origin, ray_direction, tmin, tmax, 0.0f, // rayTime OptixVisibilityMask( 255 ), OPTIX_RAY_FLAG_NONE, RAY_TYPE_RADIANCE, // SBT offset RAY_TYPE_COUNT, // SBT stride RAY_TYPE_RADIANCE, // missSBTIndex u0, u1 ); } static __forceinline__ __device__ bool traceOcclusion( OptixTraversableHandle handle, float3 ray_origin, float3 ray_direction, float tmin, float tmax ) { unsigned int occluded = 0u; optixTrace( handle, ray_origin, ray_direction, tmin, tmax, 0.0f, // rayTime OptixVisibilityMask( 255 ), OPTIX_RAY_FLAG_TERMINATE_ON_FIRST_HIT, RAY_TYPE_OCCLUSION, // SBT offset RAY_TYPE_COUNT, // SBT stride RAY_TYPE_OCCLUSION, // missSBTIndex occluded ); return occluded; } //------------------------------------------------------------------------------ // // Optix Programs // //------------------------------------------------------------------------------ extern "C" __global__ void __raygen__rg() { const int w = params.width; const int h = params.height; const uint3 launch_idx = optixGetLaunchIndex(); const int2 pixel_idx = params.sample_index_buffer[ launch_idx.x ]; // Work distribution might assign tiles that cross over image boundary if( pixel_idx.x > w-1 || pixel_idx.y > h-1 ) return; const float3 eye = params.eye; const float3 U = params.U; const float3 V = params.V; const float3 W = params.W; const int subframe_index = params.subframe_index; unsigned int seed = tea<4>( pixel_idx.y*w + pixel_idx.x, subframe_index ); float3 result = make_float3( 0.0f ); int i = params.samples_per_launch; do { // The center of each pixel is at fraction (0.5,0.5) const float2 subpixel_jitter = make_float2( rnd( seed ), rnd( seed ) ); const float2 d = 2.0f * make_float2( ( static_cast<float>( pixel_idx.x ) + subpixel_jitter.x ) / static_cast<float>( w ), ( static_cast<float>( pixel_idx.y ) + subpixel_jitter.y ) / static_cast<float>( h ) ) - 1.0f; float3 ray_direction = normalize(d.x*U + d.y*V + W); float3 ray_origin = eye; RadiancePRD prd; prd.emitted = make_float3(0.f); prd.radiance = make_float3(0.f); prd.attenuation = make_float3(1.f); prd.countEmitted = true; prd.done = false; prd.seed = seed; int depth = 0; for( ;; ) { traceRadiance( params.handle, ray_origin, ray_direction, 0.01f, // tmin 1e16f, // tmax &prd ); result += prd.emitted; result += prd.radiance * prd.attenuation; if( prd.done || depth >= 3 ) break; ray_origin = prd.origin; ray_direction = prd.direction; ++depth; } } while( --i ); float3 accum_color = result / static_cast<float>( params.samples_per_launch ); if( subframe_index > 0 ) { const float a = 1.0f / static_cast<float>( subframe_index+1 ); const float3 accum_color_prev = make_float3( params.sample_accum_buffer[ launch_idx.x ]); accum_color = lerp( accum_color_prev, accum_color, a ); } params.sample_accum_buffer [ launch_idx.x ] = make_float4( accum_color, 1.0f); const unsigned int image_index = pixel_idx.y * params.width + pixel_idx.x; float3 device_color = deviceColor( params.device_idx ) * params.device_color_scale; params.result_buffer[ image_index ] = make_color ( accum_color + device_color ); } extern "C" __global__ void __miss__radiance() { MissData* rt_data = reinterpret_cast<MissData*>( optixGetSbtDataPointer() ); RadiancePRD* prd = getPRD(); prd->radiance = make_float3( rt_data->r, rt_data->g, rt_data->b ); prd->done = true; } extern "C" __global__ void __closesthit__occlusion() { setPayloadOcclusion( true ); } extern "C" __global__ void __closesthit__radiance() { HitGroupData* rt_data = (HitGroupData*)optixGetSbtDataPointer(); RadiancePRD* prd = getPRD(); const int prim_idx = optixGetPrimitiveIndex(); const float3 ray_dir = optixGetWorldRayDirection(); const int vert_idx_offset = prim_idx*3; // Compute normal and hit point const float3 v0 = make_float3( rt_data->vertices[ vert_idx_offset+0 ] ); const float3 v1 = make_float3( rt_data->vertices[ vert_idx_offset+1 ] ); const float3 v2 = make_float3( rt_data->vertices[ vert_idx_offset+2 ] ); const float3 N_0 = normalize( cross( v1-v0, v2-v0 ) ); const float3 N = faceforward( N_0, -ray_dir, N_0 ); const float3 P = optixGetWorldRayOrigin() + optixGetRayTmax()*ray_dir; // Account for emission if( prd->countEmitted ) prd->emitted = rt_data->emission_color; else prd->emitted = make_float3( 0.0f ); // Compute attenuation (diffuse color) using texture if available hipTextureObject_t texture = rt_data->diffuse_texture; if (texture != 0) { // get barycentric coordinates const float2 barycentrics = optixGetTriangleBarycentrics(); const float b1 = barycentrics.x; const float b2 = barycentrics.y; const float b0 = 1.0f - (b1 + b2); // compute texture coordinates const int vindex = optixGetPrimitiveIndex() * 3; const float2 t0 = rt_data->tex_coords[ vindex+0 ]; const float2 t1 = rt_data->tex_coords[ vindex+1 ]; const float2 t2 = rt_data->tex_coords[ vindex+2 ]; float2 tex_coord = b0*t0 + b1*t1 + b2*t2; float s = tex_coord.x; float t = tex_coord.y; // sample texture float4 tex_val = tex2D<float4>( rt_data->diffuse_texture, s, t ); prd->attenuation *= make_float3( tex_val ); } else { prd->attenuation *= rt_data->diffuse_color; } unsigned int seed = prd->seed; // Sample a hemisphere direction and place in per-ray data { const float z1 = rnd(seed); const float z2 = rnd(seed); float3 w_in; cosine_sample_hemisphere( z1, z2, w_in ); Onb onb( N ); onb.inverse_transform( w_in ); prd->direction = w_in; prd->origin = P; prd->countEmitted = false; } // Sample a position on the light source const float z1 = rnd(seed); const float z2 = rnd(seed); prd->seed = seed; ParallelogramLight light = params.light; const float3 light_pos = light.corner + light.v1 * z1 + light.v2 * z2; // Calculate properties of light sample (for area based pdf) const float Ldist = length(light_pos - P ); const float3 L = normalize(light_pos - P ); const float nDl = dot( N, L ); const float LnDl = -dot( light.normal, L ); // Cast the shadow ray float weight = 0.0f; if( nDl > 0.0f && LnDl > 0.0f ) { const bool occluded = traceOcclusion( params.handle, P, L, 0.01f, // tmin Ldist - 0.01f // tmax ); if( !occluded ) { const float A = length(cross(light.v1, light.v2)); weight = nDl * LnDl * A / (M_PIf * Ldist * Ldist); } } prd->radiance += light.emission * weight; }
a917d5b04d91fd5bda9d8bf63c18f4cf43c0db45.cu
// // Copyright (c) 2019, NVIDIA CORPORATION. All rights reserved. // // Redistribution and use in source and binary forms, with or without // modification, are permitted provided that the following conditions // are met: // * Redistributions of source code must retain the above copyright // notice, this list of conditions and the following disclaimer. // * Redistributions in binary form must reproduce the above copyright // notice, this list of conditions and the following disclaimer in the // documentation and/or other materials provided with the distribution. // * Neither the name of NVIDIA CORPORATION nor the names of its // contributors may be used to endorse or promote products derived // from this software without specific prior written permission. // // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ``AS IS'' AND ANY // EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE // IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR // PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR // CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, // EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, // PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR // PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY // OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. // #define OPTIX_COMPATIBILITY 7 #include <optix.h> #include "optixNVLink.h" #include <sutil/vec_math.h> #include <cuda/helpers.h> #include <cuda/random.h> extern "C" { __constant__ Params params; } //------------------------------------------------------------------------------ // // Per ray data, and getting at it // //------------------------------------------------------------------------------ // Per-ray data for radiance rays struct RadiancePRD { float3 emitted; float3 radiance; float3 attenuation; float3 origin; float3 direction; unsigned int seed; int countEmitted; int done; int pad; }; static __forceinline__ __device__ void* unpackPointer( unsigned int i0, unsigned int i1 ) { const unsigned long long uptr = static_cast<unsigned long long>( i0 ) << 32 | i1; void* ptr = reinterpret_cast<void*>( uptr ); return ptr; } static __forceinline__ __device__ void packPointer( void* ptr, unsigned int& i0, unsigned int& i1 ) { const unsigned long long uptr = reinterpret_cast<unsigned long long>( ptr ); i0 = uptr >> 32; i1 = uptr & 0x00000000ffffffff; } static __forceinline__ __device__ RadiancePRD* getPRD() { const unsigned int u0 = optixGetPayload_0(); const unsigned int u1 = optixGetPayload_1(); return reinterpret_cast<RadiancePRD*>( unpackPointer( u0, u1 ) ); } // Per-ray data for occlusion rays static __forceinline__ __device__ void setPayloadOcclusion( bool occluded ) { optixSetPayload_0( static_cast<unsigned int>( occluded ) ); } //------------------------------------------------------------------------------ // // Sampling and color // //------------------------------------------------------------------------------ struct Onb { __forceinline__ __device__ Onb(const float3& normal) { m_normal = normal; if( fabs(m_normal.x) > fabs(m_normal.z) ) { m_binormal.x = -m_normal.y; m_binormal.y = m_normal.x; m_binormal.z = 0; } else { m_binormal.x = 0; m_binormal.y = -m_normal.z; m_binormal.z = m_normal.y; } m_binormal = normalize(m_binormal); m_tangent = cross( m_binormal, m_normal ); } __forceinline__ __device__ void inverse_transform(float3& p) const { p = p.x*m_tangent + p.y*m_binormal + p.z*m_normal; } float3 m_tangent; float3 m_binormal; float3 m_normal; }; static __forceinline__ __device__ void cosine_sample_hemisphere(const float u1, const float u2, float3& p) { // Uniformly sample disk. const float r = sqrtf( u1 ); const float phi = 2.0f*M_PIf * u2; p.x = r * cosf( phi ); p.y = r * sinf( phi ); // Project up to hemisphere. p.z = sqrtf( fmaxf( 0.0f, 1.0f - p.x*p.x - p.y*p.y ) ); } __forceinline__ __device__ float3 deviceColor( unsigned int idx ) { return make_float3( idx == 0 ? 0.05f : 0.0f, idx == 1 ? 0.05f : 0.0f, idx == 2 ? 0.05f : 0.0f ); } //------------------------------------------------------------------------------ // // Tracing rays // //------------------------------------------------------------------------------ static __forceinline__ __device__ void traceRadiance( OptixTraversableHandle handle, float3 ray_origin, float3 ray_direction, float tmin, float tmax, RadiancePRD* prd ) { unsigned int u0, u1; packPointer( prd, u0, u1 ); optixTrace( handle, ray_origin, ray_direction, tmin, tmax, 0.0f, // rayTime OptixVisibilityMask( 255 ), OPTIX_RAY_FLAG_NONE, RAY_TYPE_RADIANCE, // SBT offset RAY_TYPE_COUNT, // SBT stride RAY_TYPE_RADIANCE, // missSBTIndex u0, u1 ); } static __forceinline__ __device__ bool traceOcclusion( OptixTraversableHandle handle, float3 ray_origin, float3 ray_direction, float tmin, float tmax ) { unsigned int occluded = 0u; optixTrace( handle, ray_origin, ray_direction, tmin, tmax, 0.0f, // rayTime OptixVisibilityMask( 255 ), OPTIX_RAY_FLAG_TERMINATE_ON_FIRST_HIT, RAY_TYPE_OCCLUSION, // SBT offset RAY_TYPE_COUNT, // SBT stride RAY_TYPE_OCCLUSION, // missSBTIndex occluded ); return occluded; } //------------------------------------------------------------------------------ // // Optix Programs // //------------------------------------------------------------------------------ extern "C" __global__ void __raygen__rg() { const int w = params.width; const int h = params.height; const uint3 launch_idx = optixGetLaunchIndex(); const int2 pixel_idx = params.sample_index_buffer[ launch_idx.x ]; // Work distribution might assign tiles that cross over image boundary if( pixel_idx.x > w-1 || pixel_idx.y > h-1 ) return; const float3 eye = params.eye; const float3 U = params.U; const float3 V = params.V; const float3 W = params.W; const int subframe_index = params.subframe_index; unsigned int seed = tea<4>( pixel_idx.y*w + pixel_idx.x, subframe_index ); float3 result = make_float3( 0.0f ); int i = params.samples_per_launch; do { // The center of each pixel is at fraction (0.5,0.5) const float2 subpixel_jitter = make_float2( rnd( seed ), rnd( seed ) ); const float2 d = 2.0f * make_float2( ( static_cast<float>( pixel_idx.x ) + subpixel_jitter.x ) / static_cast<float>( w ), ( static_cast<float>( pixel_idx.y ) + subpixel_jitter.y ) / static_cast<float>( h ) ) - 1.0f; float3 ray_direction = normalize(d.x*U + d.y*V + W); float3 ray_origin = eye; RadiancePRD prd; prd.emitted = make_float3(0.f); prd.radiance = make_float3(0.f); prd.attenuation = make_float3(1.f); prd.countEmitted = true; prd.done = false; prd.seed = seed; int depth = 0; for( ;; ) { traceRadiance( params.handle, ray_origin, ray_direction, 0.01f, // tmin 1e16f, // tmax &prd ); result += prd.emitted; result += prd.radiance * prd.attenuation; if( prd.done || depth >= 3 ) break; ray_origin = prd.origin; ray_direction = prd.direction; ++depth; } } while( --i ); float3 accum_color = result / static_cast<float>( params.samples_per_launch ); if( subframe_index > 0 ) { const float a = 1.0f / static_cast<float>( subframe_index+1 ); const float3 accum_color_prev = make_float3( params.sample_accum_buffer[ launch_idx.x ]); accum_color = lerp( accum_color_prev, accum_color, a ); } params.sample_accum_buffer [ launch_idx.x ] = make_float4( accum_color, 1.0f); const unsigned int image_index = pixel_idx.y * params.width + pixel_idx.x; float3 device_color = deviceColor( params.device_idx ) * params.device_color_scale; params.result_buffer[ image_index ] = make_color ( accum_color + device_color ); } extern "C" __global__ void __miss__radiance() { MissData* rt_data = reinterpret_cast<MissData*>( optixGetSbtDataPointer() ); RadiancePRD* prd = getPRD(); prd->radiance = make_float3( rt_data->r, rt_data->g, rt_data->b ); prd->done = true; } extern "C" __global__ void __closesthit__occlusion() { setPayloadOcclusion( true ); } extern "C" __global__ void __closesthit__radiance() { HitGroupData* rt_data = (HitGroupData*)optixGetSbtDataPointer(); RadiancePRD* prd = getPRD(); const int prim_idx = optixGetPrimitiveIndex(); const float3 ray_dir = optixGetWorldRayDirection(); const int vert_idx_offset = prim_idx*3; // Compute normal and hit point const float3 v0 = make_float3( rt_data->vertices[ vert_idx_offset+0 ] ); const float3 v1 = make_float3( rt_data->vertices[ vert_idx_offset+1 ] ); const float3 v2 = make_float3( rt_data->vertices[ vert_idx_offset+2 ] ); const float3 N_0 = normalize( cross( v1-v0, v2-v0 ) ); const float3 N = faceforward( N_0, -ray_dir, N_0 ); const float3 P = optixGetWorldRayOrigin() + optixGetRayTmax()*ray_dir; // Account for emission if( prd->countEmitted ) prd->emitted = rt_data->emission_color; else prd->emitted = make_float3( 0.0f ); // Compute attenuation (diffuse color) using texture if available cudaTextureObject_t texture = rt_data->diffuse_texture; if (texture != 0) { // get barycentric coordinates const float2 barycentrics = optixGetTriangleBarycentrics(); const float b1 = barycentrics.x; const float b2 = barycentrics.y; const float b0 = 1.0f - (b1 + b2); // compute texture coordinates const int vindex = optixGetPrimitiveIndex() * 3; const float2 t0 = rt_data->tex_coords[ vindex+0 ]; const float2 t1 = rt_data->tex_coords[ vindex+1 ]; const float2 t2 = rt_data->tex_coords[ vindex+2 ]; float2 tex_coord = b0*t0 + b1*t1 + b2*t2; float s = tex_coord.x; float t = tex_coord.y; // sample texture float4 tex_val = tex2D<float4>( rt_data->diffuse_texture, s, t ); prd->attenuation *= make_float3( tex_val ); } else { prd->attenuation *= rt_data->diffuse_color; } unsigned int seed = prd->seed; // Sample a hemisphere direction and place in per-ray data { const float z1 = rnd(seed); const float z2 = rnd(seed); float3 w_in; cosine_sample_hemisphere( z1, z2, w_in ); Onb onb( N ); onb.inverse_transform( w_in ); prd->direction = w_in; prd->origin = P; prd->countEmitted = false; } // Sample a position on the light source const float z1 = rnd(seed); const float z2 = rnd(seed); prd->seed = seed; ParallelogramLight light = params.light; const float3 light_pos = light.corner + light.v1 * z1 + light.v2 * z2; // Calculate properties of light sample (for area based pdf) const float Ldist = length(light_pos - P ); const float3 L = normalize(light_pos - P ); const float nDl = dot( N, L ); const float LnDl = -dot( light.normal, L ); // Cast the shadow ray float weight = 0.0f; if( nDl > 0.0f && LnDl > 0.0f ) { const bool occluded = traceOcclusion( params.handle, P, L, 0.01f, // tmin Ldist - 0.01f // tmax ); if( !occluded ) { const float A = length(cross(light.v1, light.v2)); weight = nDl * LnDl * A / (M_PIf * Ldist * Ldist); } } prd->radiance += light.emission * weight; }
6422d534f0cf82292f30939653a68e9184d47ef9.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include <vector> #include <hip/driver_types.h> #include <device_launch_parameters.h> #include "caffe/layers/eltwise_layer.hpp" namespace caffe { template <typename Ftype> __global__ void MaxForward(const int nthreads, const Ftype* bottom_data_a, const Ftype* bottom_data_b, const int blob_idx, Ftype* top_data, int* mask) { CUDA_KERNEL_LOOP(index, nthreads) { Ftype maxval = -max_dtype<Ftype>(); int maxidx = -1; if (bottom_data_a[index] > bottom_data_b[index]) { // only update for very first bottom_data blob (blob_idx == 0) if (blob_idx == 0) { maxval = bottom_data_a[index]; top_data[index] = maxval; maxidx = blob_idx; mask[index] = maxidx; } } else { maxval = bottom_data_b[index]; top_data[index] = maxval; maxidx = blob_idx + 1; mask[index] = maxidx; } } } template <typename Ftype, typename Btype> void EltwiseLayer<Ftype, Btype>::Forward_gpu(const vector<Blob*>& bottom, const vector<Blob*>& top) { //It is assumed that operations across multiple blobs (such as eltwise) is done in high precision. //so that there won't be loss of precision when we align the quantization ranges of different inputs. this->Quantize_gpu(bottom, top); int* mask = nullptr; const int count = top[0]->count(); // convert to Ftype Ftype* top_data = top[0]->mutable_gpu_data<Ftype>(); switch (op_) { case EltwiseParameter_EltwiseOp_PROD: caffe_gpu_mul(count, bottom[0]->gpu_data<Ftype>(), bottom[1]->gpu_data<Ftype>(), top_data); for (int i = 2; i < bottom.size(); ++i) { caffe_gpu_mul(count, top_data, bottom[i]->gpu_data<Ftype>(), top_data); } break; case EltwiseParameter_EltwiseOp_SUM: if (no_coeffs_) { for (int i = 1; i < bottom.size(); ++i) { caffe_gpu_incr(count, bottom[i]->gpu_data<Ftype>(), top_data); } } else { caffe_gpu_set(count, Ftype(0.), top_data); // TODO(shelhamer) does cuBLAS optimize to sum for coeff = 1? for (int i = 0; i < bottom.size(); ++i) { caffe_gpu_axpy(count, Ftype(coeffs_[i]), bottom[i]->gpu_data<Ftype>(), top_data); } } break; case EltwiseParameter_EltwiseOp_MAX: { mask = max_idx_.mutable_gpu_data(); hipStream_t stream = Caffe::thread_stream(); // NOLINT_NEXT_LINE(whitespace/operators) hipLaunchKernelGGL(( MaxForward) , dim3(CAFFE_GET_BLOCKS(count)), dim3(CAFFE_CUDA_NUM_THREADS), 0, stream, count, bottom[0]->gpu_data<Ftype>(), bottom[1]->gpu_data<Ftype>(), 0, top_data, mask); for (int i = 2; i < bottom.size(); ++i) { // NOLINT_NEXT_LINE(whitespace/operators) hipLaunchKernelGGL(( MaxForward) , dim3(CAFFE_GET_BLOCKS(count)), dim3(CAFFE_CUDA_NUM_THREADS), 0, stream, count, top_data, bottom[i]->gpu_data<Ftype>(), i - 1, top_data, mask); } CUDA_CHECK(hipStreamSynchronize(stream)); } break; default: LOG(FATAL) << "Unknown elementwise operation."; } this->Quantize_gpu(bottom, top); } template <typename Btype> __global__ void MaxBackward(const int nthreads, const Btype* top_diff, const int blob_idx, const int* mask, Btype* bottom_diff) { CUDA_KERNEL_LOOP(index, nthreads) { Btype gradient = 0; if (mask[index] == blob_idx) { gradient += top_diff[index]; } bottom_diff[index] = gradient; } } template <typename Ftype, typename Btype> void EltwiseLayer<Ftype, Btype>::Backward_gpu(const vector<Blob*>& top, const vector<bool>& propagate_down, const vector<Blob*>& bottom) { const int* mask = nullptr; const int count = top[0]->count(); const Btype* top_data = top[0]->gpu_data<Btype>(); const Btype* top_diff = top[0]->gpu_diff<Btype>(); for (int i = 0; i < bottom.size(); ++i) { if (propagate_down[i]) { const Btype* bottom_data = bottom[i]->gpu_data<Btype>(); switch (op_) { case EltwiseParameter_EltwiseOp_PROD: { Btype *bottom_diff = bottom[i]->mutable_gpu_diff<Btype>(); if (stable_prod_grad_) { bool initialized = false; for (int j = 0; j < bottom.size(); ++j) { if (i == j) { continue; } if (!initialized) { caffe_copy(count, bottom[j]->gpu_data<Btype>(), bottom_diff); initialized = true; } else { caffe_gpu_mul(count, bottom[j]->gpu_data<Btype>(), bottom_diff, bottom_diff); } } } else { caffe_gpu_div(count, top_data, bottom_data, bottom_diff); } caffe_gpu_mul(count, bottom_diff, top_diff, bottom_diff); } break; case EltwiseParameter_EltwiseOp_SUM: if (!no_coeffs_) { caffe_gpu_scale(count, Btype(coeffs_[i]), top_diff, bottom[i]->mutable_gpu_diff<Btype>()); } break; case EltwiseParameter_EltwiseOp_MAX: mask = max_idx_.gpu_data(); MaxBackward<Btype> // NOLINT_NEXT_LINE(whitespacehipLaunchKernelGGL((/operators)) , dim3(CAFFE_GET_BLOCKS(count)), dim3(CAFFE_CUDA_NUM_THREADS), 0, Caffe::thread_stream(), count, top_diff, i, mask, bottom[i]->mutable_gpu_diff<Btype>()); CUDA_CHECK(hipStreamSynchronize(Caffe::thread_stream())); break; default: LOG(FATAL) << "Unknown elementwise operation."; } } } } INSTANTIATE_LAYER_GPU_FUNCS_FB(EltwiseLayer); } // namespace caffe
6422d534f0cf82292f30939653a68e9184d47ef9.cu
#include <vector> #include <driver_types.h> #include <device_launch_parameters.h> #include "caffe/layers/eltwise_layer.hpp" namespace caffe { template <typename Ftype> __global__ void MaxForward(const int nthreads, const Ftype* bottom_data_a, const Ftype* bottom_data_b, const int blob_idx, Ftype* top_data, int* mask) { CUDA_KERNEL_LOOP(index, nthreads) { Ftype maxval = -max_dtype<Ftype>(); int maxidx = -1; if (bottom_data_a[index] > bottom_data_b[index]) { // only update for very first bottom_data blob (blob_idx == 0) if (blob_idx == 0) { maxval = bottom_data_a[index]; top_data[index] = maxval; maxidx = blob_idx; mask[index] = maxidx; } } else { maxval = bottom_data_b[index]; top_data[index] = maxval; maxidx = blob_idx + 1; mask[index] = maxidx; } } } template <typename Ftype, typename Btype> void EltwiseLayer<Ftype, Btype>::Forward_gpu(const vector<Blob*>& bottom, const vector<Blob*>& top) { //It is assumed that operations across multiple blobs (such as eltwise) is done in high precision. //so that there won't be loss of precision when we align the quantization ranges of different inputs. this->Quantize_gpu(bottom, top); int* mask = nullptr; const int count = top[0]->count(); // convert to Ftype Ftype* top_data = top[0]->mutable_gpu_data<Ftype>(); switch (op_) { case EltwiseParameter_EltwiseOp_PROD: caffe_gpu_mul(count, bottom[0]->gpu_data<Ftype>(), bottom[1]->gpu_data<Ftype>(), top_data); for (int i = 2; i < bottom.size(); ++i) { caffe_gpu_mul(count, top_data, bottom[i]->gpu_data<Ftype>(), top_data); } break; case EltwiseParameter_EltwiseOp_SUM: if (no_coeffs_) { for (int i = 1; i < bottom.size(); ++i) { caffe_gpu_incr(count, bottom[i]->gpu_data<Ftype>(), top_data); } } else { caffe_gpu_set(count, Ftype(0.), top_data); // TODO(shelhamer) does cuBLAS optimize to sum for coeff = 1? for (int i = 0; i < bottom.size(); ++i) { caffe_gpu_axpy(count, Ftype(coeffs_[i]), bottom[i]->gpu_data<Ftype>(), top_data); } } break; case EltwiseParameter_EltwiseOp_MAX: { mask = max_idx_.mutable_gpu_data(); cudaStream_t stream = Caffe::thread_stream(); // NOLINT_NEXT_LINE(whitespace/operators) MaxForward <<<CAFFE_GET_BLOCKS(count), CAFFE_CUDA_NUM_THREADS, 0, stream>>>( count, bottom[0]->gpu_data<Ftype>(), bottom[1]->gpu_data<Ftype>(), 0, top_data, mask); for (int i = 2; i < bottom.size(); ++i) { // NOLINT_NEXT_LINE(whitespace/operators) MaxForward <<<CAFFE_GET_BLOCKS(count), CAFFE_CUDA_NUM_THREADS, 0, stream>>>( count, top_data, bottom[i]->gpu_data<Ftype>(), i - 1, top_data, mask); } CUDA_CHECK(cudaStreamSynchronize(stream)); } break; default: LOG(FATAL) << "Unknown elementwise operation."; } this->Quantize_gpu(bottom, top); } template <typename Btype> __global__ void MaxBackward(const int nthreads, const Btype* top_diff, const int blob_idx, const int* mask, Btype* bottom_diff) { CUDA_KERNEL_LOOP(index, nthreads) { Btype gradient = 0; if (mask[index] == blob_idx) { gradient += top_diff[index]; } bottom_diff[index] = gradient; } } template <typename Ftype, typename Btype> void EltwiseLayer<Ftype, Btype>::Backward_gpu(const vector<Blob*>& top, const vector<bool>& propagate_down, const vector<Blob*>& bottom) { const int* mask = nullptr; const int count = top[0]->count(); const Btype* top_data = top[0]->gpu_data<Btype>(); const Btype* top_diff = top[0]->gpu_diff<Btype>(); for (int i = 0; i < bottom.size(); ++i) { if (propagate_down[i]) { const Btype* bottom_data = bottom[i]->gpu_data<Btype>(); switch (op_) { case EltwiseParameter_EltwiseOp_PROD: { Btype *bottom_diff = bottom[i]->mutable_gpu_diff<Btype>(); if (stable_prod_grad_) { bool initialized = false; for (int j = 0; j < bottom.size(); ++j) { if (i == j) { continue; } if (!initialized) { caffe_copy(count, bottom[j]->gpu_data<Btype>(), bottom_diff); initialized = true; } else { caffe_gpu_mul(count, bottom[j]->gpu_data<Btype>(), bottom_diff, bottom_diff); } } } else { caffe_gpu_div(count, top_data, bottom_data, bottom_diff); } caffe_gpu_mul(count, bottom_diff, top_diff, bottom_diff); } break; case EltwiseParameter_EltwiseOp_SUM: if (!no_coeffs_) { caffe_gpu_scale(count, Btype(coeffs_[i]), top_diff, bottom[i]->mutable_gpu_diff<Btype>()); } break; case EltwiseParameter_EltwiseOp_MAX: mask = max_idx_.gpu_data(); MaxBackward<Btype> // NOLINT_NEXT_LINE(whitespace/operators) <<<CAFFE_GET_BLOCKS(count), CAFFE_CUDA_NUM_THREADS, 0, Caffe::thread_stream()>>>( count, top_diff, i, mask, bottom[i]->mutable_gpu_diff<Btype>()); CUDA_CHECK(cudaStreamSynchronize(Caffe::thread_stream())); break; default: LOG(FATAL) << "Unknown elementwise operation."; } } } } INSTANTIATE_LAYER_GPU_FUNCS_FB(EltwiseLayer); } // namespace caffe
6e3c1a7de6117664f2f3ddbf07ad49d4ce3dcd32.hip
// !!! This is a file automatically generated by hipify!!! #include <stdio.h> #include <iostream> #include <hip/hip_runtime_api.h> //#include <cutil.h> #include <hip/hip_runtime.h> float* h_A; float* h_B; float* h_C; float* h_res; float* d_A; float* d_B; float* d_C; float* d_res; __global__ //void compute(const float* A, const float* B, const float* C, float* D, int n) { void compute(float* D, int n, int div) { int tid = blockDim.x * blockIdx.x + threadIdx.x; float I1 = tid * 2.0; int thread_id = threadIdx.x % 32; if (thread_id < div) { __asm volatile ( " .reg .s32 %r129;\n\t" " .reg .s32 %r30;\n\t" " .reg .s32 %r31;\n\t" " .reg .s32 %r32;\n\t" " .reg .s32 %r33;\n\t" " .reg .s32 %r34;\n\t" " .reg .s32 %r35;\n\t" " .reg .s32 %r36;\n\t" " .reg .s32 %r37;\n\t" " .reg .s32 %r38;\n\t" " .reg .s32 %r39;\n\t" " .reg .s32 %r40;\n\t" " .reg .s32 %r41;\n\t" " .reg .s32 %r42;\n\t" " .reg .s32 %r43;\n\t" " .reg .s32 %r44;\n\t" " .reg .s32 %r45;\n\t" " .reg .f64 %r112;\n\t" " .reg .f64 %r113;\n\t" " .reg .f64 %r114;\n\t" " .reg .f64 %r115;\n\t" " .reg .f64 %r116;\n\t" " .reg .f64 %r117;\n\t" " .reg .f64 %r118;\n\t" " .reg .f64 %r119;\n\t" " .reg .f64 %r120;\n\t" " .reg .f64 %r121;\n\t" " .reg .f64 %r122;\n\t" " .reg .f64 %r123;\n\t" " .reg .f64 %r124;\n\t" " .reg .f64 %r125;\n\t" " .reg .f64 %r126;\n\t" " .reg .f64 %r127;\n\t" " .reg .f64 %r128;\n\t" "mov.f64 %r112, 4.4;\n\t" "mov.f64 %r113, %r112;\n\t" "mov.f64 %r114, 2.2;\n\t" "mov.f64 %r115, 3.3;\n\t" "mov.f64 %r116, 1.23;\n\t" "mov.f64 %r117, 2.42;\n\t" "mov.f64 %r118, 3.34;\n\t" "mov.f64 %r119, 5.62;\n\t" "mov.f64 %r120, 2.56;\n\t" "mov.f64 %r121, 1.56;\n\t" "mov.f64 %r122, 2.56;\n\t" "mov.f64 %r123, 5.56;\n\t" "mov.f64 %r124, 8.56;\n\t" "mov.f64 %r125, 3.56;\n\t" "mov.f64 %r126, 5.56;\n\t" "mov.f64 %r127, 6.56;\n\t" "mov.f64 %r128, 5.6;\n\t" ); for (int k = 0; k < n; k++) { __asm volatile ( "cvt.rni.s32.f64 %r30, %r113;\n\t" "cvt.rm.f64.s32 %r113, %r30;\n\t" "cvt.rni.s32.f64 %r31, %r114;\n\t" "cvt.rm.f64.s32 %r114, %r31;\n\t" "cvt.rni.s32.f64 %r32, %r115;\n\t" "cvt.rm.f64.s32 %r115, %r32;\n\t" "cvt.rni.s32.f64 %r33, %r116;\n\t" "cvt.rm.f64.s32 %r116, %r33;\n\t" "cvt.rni.s32.f64 %r34, %r117;\n\t" "cvt.rm.f64.s32 %r117, %r34;\n\t" "cvt.rni.s32.f64 %r35, %r118;\n\t" "cvt.rm.f64.s32 %r118, %r35;\n\t" "cvt.rni.s32.f64 %r36, %r119;\n\t" "cvt.rm.f64.s32 %r119, %r36;\n\t" "cvt.rni.s32.f64 %r37, %r120;\n\t" "cvt.rm.f64.s32 %r120, %r37;\n\t" "cvt.rni.s32.f64 %r38, %r121;\n\t" "cvt.rm.f64.s32 %r121, %r38;\n\t" "cvt.rni.s32.f64 %r39, %r122;\n\t" "cvt.rm.f64.s32 %r122, %r39;\n\t" "cvt.rni.s32.f64 %r40, %r123;\n\t" "cvt.rm.f64.s32 %r123, %r40;\n\t" "cvt.rni.s32.f64 %r41, %r124;\n\t" "cvt.rm.f64.s32 %r124, %r41;\n\t" "cvt.rni.s32.f64 %r42, %r125;\n\t" "cvt.rm.f64.s32 %r125, %r42;\n\t" "cvt.rni.s32.f64 %r43, %r126;\n\t" "cvt.rm.f64.s32 %r126, %r43;\n\t" "cvt.rni.s32.f64 %r44, %r127;\n\t" "cvt.rm.f64.s32 %r127, %r44;\n\t" "cvt.rni.s32.f64 %r45, %r128;\n\t" "cvt.rm.f64.s32 %r128, %r45;\n\t" "cvt.rni.s32.f64 %r30, %r113;\n\t" "cvt.rm.f64.s32 %r113, %r30;\n\t" "cvt.rni.s32.f64 %r31, %r114;\n\t" "cvt.rm.f64.s32 %r114, %r31;\n\t" "cvt.rni.s32.f64 %r32, %r115;\n\t" "cvt.rm.f64.s32 %r115, %r32;\n\t" "cvt.rni.s32.f64 %r33, %r116;\n\t" "cvt.rm.f64.s32 %r116, %r33;\n\t" "cvt.rni.s32.f64 %r34, %r117;\n\t" "cvt.rm.f64.s32 %r117, %r34;\n\t" "cvt.rni.s32.f64 %r35, %r118;\n\t" "cvt.rm.f64.s32 %r118, %r35;\n\t" "cvt.rni.s32.f64 %r36, %r119;\n\t" "cvt.rm.f64.s32 %r119, %r36;\n\t" "cvt.rni.s32.f64 %r37, %r120;\n\t" "cvt.rm.f64.s32 %r120, %r37;\n\t" "cvt.rni.s32.f64 %r38, %r121;\n\t" "cvt.rm.f64.s32 %r121, %r38;\n\t" "cvt.rni.s32.f64 %r39, %r122;\n\t" "cvt.rm.f64.s32 %r122, %r39;\n\t" "cvt.rni.s32.f64 %r40, %r123;\n\t" "cvt.rm.f64.s32 %r123, %r40;\n\t" "cvt.rni.s32.f64 %r41, %r124;\n\t" "cvt.rm.f64.s32 %r124, %r41;\n\t" "cvt.rni.s32.f64 %r42, %r125;\n\t" "cvt.rm.f64.s32 %r125, %r42;\n\t" "cvt.rni.s32.f64 %r43, %r126;\n\t" "cvt.rm.f64.s32 %r126, %r43;\n\t" "cvt.rni.s32.f64 %r44, %r127;\n\t" "cvt.rm.f64.s32 %r127, %r44;\n\t" "cvt.rni.s32.f64 %r45, %r128;\n\t" "cvt.rm.f64.s32 %r128, %r45;\n\t" "cvt.rni.s32.f64 %r30, %r113;\n\t" "cvt.rm.f64.s32 %r113, %r30;\n\t" "cvt.rni.s32.f64 %r31, %r114;\n\t" "cvt.rm.f64.s32 %r114, %r31;\n\t" "cvt.rni.s32.f64 %r32, %r115;\n\t" "cvt.rm.f64.s32 %r115, %r32;\n\t" "cvt.rni.s32.f64 %r33, %r116;\n\t" "cvt.rm.f64.s32 %r116, %r33;\n\t" "cvt.rni.s32.f64 %r34, %r117;\n\t" "cvt.rm.f64.s32 %r117, %r34;\n\t" "cvt.rni.s32.f64 %r35, %r118;\n\t" "cvt.rm.f64.s32 %r118, %r35;\n\t" "cvt.rni.s32.f64 %r36, %r119;\n\t" "cvt.rm.f64.s32 %r119, %r36;\n\t" "cvt.rni.s32.f64 %r37, %r120;\n\t" "cvt.rm.f64.s32 %r120, %r37;\n\t" "cvt.rni.s32.f64 %r38, %r121;\n\t" "cvt.rm.f64.s32 %r121, %r38;\n\t" "cvt.rni.s32.f64 %r39, %r122;\n\t" "cvt.rm.f64.s32 %r122, %r39;\n\t" "cvt.rni.s32.f64 %r40, %r123;\n\t" "cvt.rm.f64.s32 %r123, %r40;\n\t" "cvt.rni.s32.f64 %r41, %r124;\n\t" "cvt.rm.f64.s32 %r124, %r41;\n\t" "cvt.rni.s32.f64 %r42, %r125;\n\t" "cvt.rm.f64.s32 %r125, %r42;\n\t" "cvt.rni.s32.f64 %r43, %r126;\n\t" "cvt.rm.f64.s32 %r126, %r43;\n\t" "cvt.rni.s32.f64 %r44, %r127;\n\t" "cvt.rm.f64.s32 %r127, %r44;\n\t" "cvt.rni.s32.f64 %r45, %r128;\n\t" "cvt.rm.f64.s32 %r128, %r45;\n\t" "cvt.rni.s32.f64 %r30, %r113;\n\t" "cvt.rm.f64.s32 %r113, %r30;\n\t" "cvt.rni.s32.f64 %r31, %r114;\n\t" "cvt.rm.f64.s32 %r114, %r31;\n\t" "cvt.rni.s32.f64 %r32, %r115;\n\t" "cvt.rm.f64.s32 %r115, %r32;\n\t" "cvt.rni.s32.f64 %r33, %r116;\n\t" "cvt.rm.f64.s32 %r116, %r33;\n\t" "cvt.rni.s32.f64 %r34, %r117;\n\t" "cvt.rm.f64.s32 %r117, %r34;\n\t" "cvt.rni.s32.f64 %r35, %r118;\n\t" "cvt.rm.f64.s32 %r118, %r35;\n\t" "cvt.rni.s32.f64 %r36, %r119;\n\t" "cvt.rm.f64.s32 %r119, %r36;\n\t" "cvt.rni.s32.f64 %r37, %r120;\n\t" "cvt.rm.f64.s32 %r120, %r37;\n\t" "cvt.rni.s32.f64 %r38, %r121;\n\t" "cvt.rm.f64.s32 %r121, %r38;\n\t" "cvt.rni.s32.f64 %r39, %r122;\n\t" "cvt.rm.f64.s32 %r122, %r39;\n\t" "cvt.rni.s32.f64 %r40, %r123;\n\t" "cvt.rm.f64.s32 %r123, %r40;\n\t" "cvt.rni.s32.f64 %r41, %r124;\n\t" "cvt.rm.f64.s32 %r124, %r41;\n\t" "cvt.rni.s32.f64 %r42, %r125;\n\t" "cvt.rm.f64.s32 %r125, %r42;\n\t" "cvt.rni.s32.f64 %r43, %r126;\n\t" "cvt.rm.f64.s32 %r126, %r43;\n\t" "cvt.rni.s32.f64 %r44, %r127;\n\t" "cvt.rm.f64.s32 %r127, %r44;\n\t" "cvt.rni.s32.f64 %r45, %r128;\n\t" "cvt.rm.f64.s32 %r128, %r45;\n\t" "cvt.rni.s32.f64 %r30, %r113;\n\t" "cvt.rm.f64.s32 %r113, %r30;\n\t" "cvt.rni.s32.f64 %r31, %r114;\n\t" "cvt.rm.f64.s32 %r114, %r31;\n\t" "cvt.rni.s32.f64 %r32, %r115;\n\t" "cvt.rm.f64.s32 %r115, %r32;\n\t" "cvt.rni.s32.f64 %r33, %r116;\n\t" "cvt.rm.f64.s32 %r116, %r33;\n\t" "cvt.rni.s32.f64 %r34, %r117;\n\t" "cvt.rm.f64.s32 %r117, %r34;\n\t" "cvt.rni.s32.f64 %r35, %r118;\n\t" "cvt.rm.f64.s32 %r118, %r35;\n\t" "cvt.rni.s32.f64 %r36, %r119;\n\t" "cvt.rm.f64.s32 %r119, %r36;\n\t" "cvt.rni.s32.f64 %r37, %r120;\n\t" "cvt.rm.f64.s32 %r120, %r37;\n\t" "cvt.rni.s32.f64 %r38, %r121;\n\t" "cvt.rm.f64.s32 %r121, %r38;\n\t" "cvt.rni.s32.f64 %r39, %r122;\n\t" "cvt.rm.f64.s32 %r122, %r39;\n\t" "cvt.rni.s32.f64 %r40, %r123;\n\t" "cvt.rm.f64.s32 %r123, %r40;\n\t" "cvt.rni.s32.f64 %r41, %r124;\n\t" "cvt.rm.f64.s32 %r124, %r41;\n\t" "cvt.rni.s32.f64 %r42, %r125;\n\t" "cvt.rm.f64.s32 %r125, %r42;\n\t" "cvt.rni.s32.f64 %r43, %r126;\n\t" "cvt.rm.f64.s32 %r126, %r43;\n\t" "cvt.rni.s32.f64 %r44, %r127;\n\t" "cvt.rm.f64.s32 %r127, %r44;\n\t" "cvt.rni.s32.f64 %r45, %r128;\n\t" "cvt.rm.f64.s32 %r128, %r45;\n\t" "cvt.rni.s32.f64 %r30, %r113;\n\t" "cvt.rm.f64.s32 %r113, %r30;\n\t" "cvt.rni.s32.f64 %r31, %r114;\n\t" "cvt.rm.f64.s32 %r114, %r31;\n\t" "cvt.rni.s32.f64 %r32, %r115;\n\t" "cvt.rm.f64.s32 %r115, %r32;\n\t" "cvt.rni.s32.f64 %r33, %r116;\n\t" "cvt.rm.f64.s32 %r116, %r33;\n\t" "cvt.rni.s32.f64 %r34, %r117;\n\t" "cvt.rm.f64.s32 %r117, %r34;\n\t" "cvt.rni.s32.f64 %r35, %r118;\n\t" "cvt.rm.f64.s32 %r118, %r35;\n\t" "cvt.rni.s32.f64 %r36, %r119;\n\t" "cvt.rm.f64.s32 %r119, %r36;\n\t" "cvt.rni.s32.f64 %r37, %r120;\n\t" "cvt.rm.f64.s32 %r120, %r37;\n\t" "cvt.rni.s32.f64 %r38, %r121;\n\t" "cvt.rm.f64.s32 %r121, %r38;\n\t" "cvt.rni.s32.f64 %r39, %r122;\n\t" "cvt.rm.f64.s32 %r122, %r39;\n\t" "cvt.rni.s32.f64 %r40, %r123;\n\t" "cvt.rm.f64.s32 %r123, %r40;\n\t" "cvt.rni.s32.f64 %r41, %r124;\n\t" "cvt.rm.f64.s32 %r124, %r41;\n\t" "cvt.rni.s32.f64 %r42, %r125;\n\t" "cvt.rm.f64.s32 %r125, %r42;\n\t" "cvt.rni.s32.f64 %r43, %r126;\n\t" "cvt.rm.f64.s32 %r126, %r43;\n\t" "cvt.rni.s32.f64 %r44, %r127;\n\t" "cvt.rm.f64.s32 %r127, %r44;\n\t" "cvt.rni.s32.f64 %r45, %r128;\n\t" "cvt.rm.f64.s32 %r128, %r45;\n\t" "cvt.rni.s32.f64 %r30, %r113;\n\t" "cvt.rm.f64.s32 %r113, %r30;\n\t" "cvt.rni.s32.f64 %r31, %r114;\n\t" "cvt.rm.f64.s32 %r114, %r31;\n\t" "cvt.rni.s32.f64 %r32, %r115;\n\t" "cvt.rm.f64.s32 %r115, %r32;\n\t" "cvt.rni.s32.f64 %r33, %r116;\n\t" "cvt.rm.f64.s32 %r116, %r33;\n\t" "cvt.rni.s32.f64 %r34, %r117;\n\t" "cvt.rm.f64.s32 %r117, %r34;\n\t" "cvt.rni.s32.f64 %r35, %r118;\n\t" "cvt.rm.f64.s32 %r118, %r35;\n\t" "cvt.rni.s32.f64 %r36, %r119;\n\t" "cvt.rm.f64.s32 %r119, %r36;\n\t" "cvt.rni.s32.f64 %r37, %r120;\n\t" "cvt.rm.f64.s32 %r120, %r37;\n\t" "cvt.rni.s32.f64 %r38, %r121;\n\t" "cvt.rm.f64.s32 %r121, %r38;\n\t" "cvt.rni.s32.f64 %r39, %r122;\n\t" "cvt.rm.f64.s32 %r122, %r39;\n\t" "cvt.rni.s32.f64 %r40, %r123;\n\t" "cvt.rm.f64.s32 %r123, %r40;\n\t" "cvt.rni.s32.f64 %r41, %r124;\n\t" "cvt.rm.f64.s32 %r124, %r41;\n\t" "cvt.rni.s32.f64 %r42, %r125;\n\t" "cvt.rm.f64.s32 %r125, %r42;\n\t" "cvt.rni.s32.f64 %r43, %r126;\n\t" "cvt.rm.f64.s32 %r126, %r43;\n\t" "cvt.rni.s32.f64 %r44, %r127;\n\t" "cvt.rm.f64.s32 %r127, %r44;\n\t" "cvt.rni.s32.f64 %r45, %r128;\n\t" "cvt.rm.f64.s32 %r128, %r45;\n\t" "cvt.rni.s32.f64 %r30, %r113;\n\t" "cvt.rm.f64.s32 %r113, %r30;\n\t" "cvt.rni.s32.f64 %r31, %r114;\n\t" "cvt.rm.f64.s32 %r114, %r31;\n\t" "cvt.rni.s32.f64 %r32, %r115;\n\t" "cvt.rm.f64.s32 %r115, %r32;\n\t" "cvt.rni.s32.f64 %r33, %r116;\n\t" "cvt.rm.f64.s32 %r116, %r33;\n\t" "cvt.rni.s32.f64 %r34, %r117;\n\t" "cvt.rm.f64.s32 %r117, %r34;\n\t" "cvt.rni.s32.f64 %r35, %r118;\n\t" "cvt.rm.f64.s32 %r118, %r35;\n\t" "cvt.rni.s32.f64 %r36, %r119;\n\t" "cvt.rm.f64.s32 %r119, %r36;\n\t" "cvt.rni.s32.f64 %r37, %r120;\n\t" "cvt.rm.f64.s32 %r120, %r37;\n\t" "cvt.rni.s32.f64 %r38, %r121;\n\t" "cvt.rm.f64.s32 %r121, %r38;\n\t" "cvt.rni.s32.f64 %r39, %r122;\n\t" "cvt.rm.f64.s32 %r122, %r39;\n\t" "cvt.rni.s32.f64 %r40, %r123;\n\t" "cvt.rm.f64.s32 %r123, %r40;\n\t" "cvt.rni.s32.f64 %r41, %r124;\n\t" "cvt.rm.f64.s32 %r124, %r41;\n\t" "cvt.rni.s32.f64 %r42, %r125;\n\t" "cvt.rm.f64.s32 %r125, %r42;\n\t" "cvt.rni.s32.f64 %r43, %r126;\n\t" "cvt.rm.f64.s32 %r126, %r43;\n\t" "cvt.rni.s32.f64 %r44, %r127;\n\t" "cvt.rm.f64.s32 %r127, %r44;\n\t" "cvt.rni.s32.f64 %r45, %r128;\n\t" "cvt.rm.f64.s32 %r128, %r45;\n\t" ); } } // __syncthreads(); // if ((blockDim.x * blockIdx.x + threadIdx.x) == 0) *D = I1; // __syncthreads(); } void usage() { std::cout << "Usage ./binary <num_blocks> <num_threads_per_block> <iterations>" "threads active per warp" << std::endl; } int main(int argc, char **argv) { if (argc != 5) { usage(); exit(1); } int num_blocks = atoi(argv[1]); int num_threads_per_block = atoi(argv[2]); int iterations = atoi(argv[3]); int divergence = atoi(argv[4]); // h_A = new float(2.0); // h_B = new float(3.0); // h_C = new float(4.0); // hipMalloc((void**)&d_A, sizeof(float)); // hipMalloc((void**)&d_B, sizeof(float)); // hipMalloc((void**)&d_C, sizeof(float)); hipMalloc((void**)&d_res, sizeof(float)); // hipMemcpy(d_A, h_A, sizeof(float), hipMemcpyHostToDevice); // hipMemcpy(d_B, h_B, sizeof(float), hipMemcpyHostToDevice); // hipMemcpy(d_C, h_C, sizeof(float), hipMemcpyHostToDevice); hipEvent_t start, stop; float time; hipEventCreate(&start); hipEventCreate(&stop); hipEventRecord(start, 0); hipProfilerStart(); // hipLaunchKernelGGL(( compute), dim3(num_blocks), dim3(num_threads_per_block), 0, 0, d_A, d_B, d_C, d_res, iterations); hipLaunchKernelGGL(( compute), dim3(num_blocks), dim3(num_threads_per_block), 0, 0, d_res, iterations, divergence); hipDeviceSynchronize(); hipProfilerStop(); hipEventRecord(stop, 0); hipEventSynchronize(stop); hipEventElapsedTime(&time, start, stop); std::cout << "GPU Elapsed Time = " << time << std::endl; hipEventDestroy(start); hipEventDestroy(stop); hipMemcpy(h_res, d_res, sizeof(float), hipMemcpyDeviceToHost); return 0; }
6e3c1a7de6117664f2f3ddbf07ad49d4ce3dcd32.cu
#include <stdio.h> #include <iostream> #include <cuda_profiler_api.h> //#include <cutil.h> #include <cuda_runtime.h> float* h_A; float* h_B; float* h_C; float* h_res; float* d_A; float* d_B; float* d_C; float* d_res; __global__ //void compute(const float* A, const float* B, const float* C, float* D, int n) { void compute(float* D, int n, int div) { int tid = blockDim.x * blockIdx.x + threadIdx.x; float I1 = tid * 2.0; int thread_id = threadIdx.x % 32; if (thread_id < div) { __asm volatile ( " .reg .s32 %r129;\n\t" " .reg .s32 %r30;\n\t" " .reg .s32 %r31;\n\t" " .reg .s32 %r32;\n\t" " .reg .s32 %r33;\n\t" " .reg .s32 %r34;\n\t" " .reg .s32 %r35;\n\t" " .reg .s32 %r36;\n\t" " .reg .s32 %r37;\n\t" " .reg .s32 %r38;\n\t" " .reg .s32 %r39;\n\t" " .reg .s32 %r40;\n\t" " .reg .s32 %r41;\n\t" " .reg .s32 %r42;\n\t" " .reg .s32 %r43;\n\t" " .reg .s32 %r44;\n\t" " .reg .s32 %r45;\n\t" " .reg .f64 %r112;\n\t" " .reg .f64 %r113;\n\t" " .reg .f64 %r114;\n\t" " .reg .f64 %r115;\n\t" " .reg .f64 %r116;\n\t" " .reg .f64 %r117;\n\t" " .reg .f64 %r118;\n\t" " .reg .f64 %r119;\n\t" " .reg .f64 %r120;\n\t" " .reg .f64 %r121;\n\t" " .reg .f64 %r122;\n\t" " .reg .f64 %r123;\n\t" " .reg .f64 %r124;\n\t" " .reg .f64 %r125;\n\t" " .reg .f64 %r126;\n\t" " .reg .f64 %r127;\n\t" " .reg .f64 %r128;\n\t" "mov.f64 %r112, 4.4;\n\t" "mov.f64 %r113, %r112;\n\t" "mov.f64 %r114, 2.2;\n\t" "mov.f64 %r115, 3.3;\n\t" "mov.f64 %r116, 1.23;\n\t" "mov.f64 %r117, 2.42;\n\t" "mov.f64 %r118, 3.34;\n\t" "mov.f64 %r119, 5.62;\n\t" "mov.f64 %r120, 2.56;\n\t" "mov.f64 %r121, 1.56;\n\t" "mov.f64 %r122, 2.56;\n\t" "mov.f64 %r123, 5.56;\n\t" "mov.f64 %r124, 8.56;\n\t" "mov.f64 %r125, 3.56;\n\t" "mov.f64 %r126, 5.56;\n\t" "mov.f64 %r127, 6.56;\n\t" "mov.f64 %r128, 5.6;\n\t" ); for (int k = 0; k < n; k++) { __asm volatile ( "cvt.rni.s32.f64 %r30, %r113;\n\t" "cvt.rm.f64.s32 %r113, %r30;\n\t" "cvt.rni.s32.f64 %r31, %r114;\n\t" "cvt.rm.f64.s32 %r114, %r31;\n\t" "cvt.rni.s32.f64 %r32, %r115;\n\t" "cvt.rm.f64.s32 %r115, %r32;\n\t" "cvt.rni.s32.f64 %r33, %r116;\n\t" "cvt.rm.f64.s32 %r116, %r33;\n\t" "cvt.rni.s32.f64 %r34, %r117;\n\t" "cvt.rm.f64.s32 %r117, %r34;\n\t" "cvt.rni.s32.f64 %r35, %r118;\n\t" "cvt.rm.f64.s32 %r118, %r35;\n\t" "cvt.rni.s32.f64 %r36, %r119;\n\t" "cvt.rm.f64.s32 %r119, %r36;\n\t" "cvt.rni.s32.f64 %r37, %r120;\n\t" "cvt.rm.f64.s32 %r120, %r37;\n\t" "cvt.rni.s32.f64 %r38, %r121;\n\t" "cvt.rm.f64.s32 %r121, %r38;\n\t" "cvt.rni.s32.f64 %r39, %r122;\n\t" "cvt.rm.f64.s32 %r122, %r39;\n\t" "cvt.rni.s32.f64 %r40, %r123;\n\t" "cvt.rm.f64.s32 %r123, %r40;\n\t" "cvt.rni.s32.f64 %r41, %r124;\n\t" "cvt.rm.f64.s32 %r124, %r41;\n\t" "cvt.rni.s32.f64 %r42, %r125;\n\t" "cvt.rm.f64.s32 %r125, %r42;\n\t" "cvt.rni.s32.f64 %r43, %r126;\n\t" "cvt.rm.f64.s32 %r126, %r43;\n\t" "cvt.rni.s32.f64 %r44, %r127;\n\t" "cvt.rm.f64.s32 %r127, %r44;\n\t" "cvt.rni.s32.f64 %r45, %r128;\n\t" "cvt.rm.f64.s32 %r128, %r45;\n\t" "cvt.rni.s32.f64 %r30, %r113;\n\t" "cvt.rm.f64.s32 %r113, %r30;\n\t" "cvt.rni.s32.f64 %r31, %r114;\n\t" "cvt.rm.f64.s32 %r114, %r31;\n\t" "cvt.rni.s32.f64 %r32, %r115;\n\t" "cvt.rm.f64.s32 %r115, %r32;\n\t" "cvt.rni.s32.f64 %r33, %r116;\n\t" "cvt.rm.f64.s32 %r116, %r33;\n\t" "cvt.rni.s32.f64 %r34, %r117;\n\t" "cvt.rm.f64.s32 %r117, %r34;\n\t" "cvt.rni.s32.f64 %r35, %r118;\n\t" "cvt.rm.f64.s32 %r118, %r35;\n\t" "cvt.rni.s32.f64 %r36, %r119;\n\t" "cvt.rm.f64.s32 %r119, %r36;\n\t" "cvt.rni.s32.f64 %r37, %r120;\n\t" "cvt.rm.f64.s32 %r120, %r37;\n\t" "cvt.rni.s32.f64 %r38, %r121;\n\t" "cvt.rm.f64.s32 %r121, %r38;\n\t" "cvt.rni.s32.f64 %r39, %r122;\n\t" "cvt.rm.f64.s32 %r122, %r39;\n\t" "cvt.rni.s32.f64 %r40, %r123;\n\t" "cvt.rm.f64.s32 %r123, %r40;\n\t" "cvt.rni.s32.f64 %r41, %r124;\n\t" "cvt.rm.f64.s32 %r124, %r41;\n\t" "cvt.rni.s32.f64 %r42, %r125;\n\t" "cvt.rm.f64.s32 %r125, %r42;\n\t" "cvt.rni.s32.f64 %r43, %r126;\n\t" "cvt.rm.f64.s32 %r126, %r43;\n\t" "cvt.rni.s32.f64 %r44, %r127;\n\t" "cvt.rm.f64.s32 %r127, %r44;\n\t" "cvt.rni.s32.f64 %r45, %r128;\n\t" "cvt.rm.f64.s32 %r128, %r45;\n\t" "cvt.rni.s32.f64 %r30, %r113;\n\t" "cvt.rm.f64.s32 %r113, %r30;\n\t" "cvt.rni.s32.f64 %r31, %r114;\n\t" "cvt.rm.f64.s32 %r114, %r31;\n\t" "cvt.rni.s32.f64 %r32, %r115;\n\t" "cvt.rm.f64.s32 %r115, %r32;\n\t" "cvt.rni.s32.f64 %r33, %r116;\n\t" "cvt.rm.f64.s32 %r116, %r33;\n\t" "cvt.rni.s32.f64 %r34, %r117;\n\t" "cvt.rm.f64.s32 %r117, %r34;\n\t" "cvt.rni.s32.f64 %r35, %r118;\n\t" "cvt.rm.f64.s32 %r118, %r35;\n\t" "cvt.rni.s32.f64 %r36, %r119;\n\t" "cvt.rm.f64.s32 %r119, %r36;\n\t" "cvt.rni.s32.f64 %r37, %r120;\n\t" "cvt.rm.f64.s32 %r120, %r37;\n\t" "cvt.rni.s32.f64 %r38, %r121;\n\t" "cvt.rm.f64.s32 %r121, %r38;\n\t" "cvt.rni.s32.f64 %r39, %r122;\n\t" "cvt.rm.f64.s32 %r122, %r39;\n\t" "cvt.rni.s32.f64 %r40, %r123;\n\t" "cvt.rm.f64.s32 %r123, %r40;\n\t" "cvt.rni.s32.f64 %r41, %r124;\n\t" "cvt.rm.f64.s32 %r124, %r41;\n\t" "cvt.rni.s32.f64 %r42, %r125;\n\t" "cvt.rm.f64.s32 %r125, %r42;\n\t" "cvt.rni.s32.f64 %r43, %r126;\n\t" "cvt.rm.f64.s32 %r126, %r43;\n\t" "cvt.rni.s32.f64 %r44, %r127;\n\t" "cvt.rm.f64.s32 %r127, %r44;\n\t" "cvt.rni.s32.f64 %r45, %r128;\n\t" "cvt.rm.f64.s32 %r128, %r45;\n\t" "cvt.rni.s32.f64 %r30, %r113;\n\t" "cvt.rm.f64.s32 %r113, %r30;\n\t" "cvt.rni.s32.f64 %r31, %r114;\n\t" "cvt.rm.f64.s32 %r114, %r31;\n\t" "cvt.rni.s32.f64 %r32, %r115;\n\t" "cvt.rm.f64.s32 %r115, %r32;\n\t" "cvt.rni.s32.f64 %r33, %r116;\n\t" "cvt.rm.f64.s32 %r116, %r33;\n\t" "cvt.rni.s32.f64 %r34, %r117;\n\t" "cvt.rm.f64.s32 %r117, %r34;\n\t" "cvt.rni.s32.f64 %r35, %r118;\n\t" "cvt.rm.f64.s32 %r118, %r35;\n\t" "cvt.rni.s32.f64 %r36, %r119;\n\t" "cvt.rm.f64.s32 %r119, %r36;\n\t" "cvt.rni.s32.f64 %r37, %r120;\n\t" "cvt.rm.f64.s32 %r120, %r37;\n\t" "cvt.rni.s32.f64 %r38, %r121;\n\t" "cvt.rm.f64.s32 %r121, %r38;\n\t" "cvt.rni.s32.f64 %r39, %r122;\n\t" "cvt.rm.f64.s32 %r122, %r39;\n\t" "cvt.rni.s32.f64 %r40, %r123;\n\t" "cvt.rm.f64.s32 %r123, %r40;\n\t" "cvt.rni.s32.f64 %r41, %r124;\n\t" "cvt.rm.f64.s32 %r124, %r41;\n\t" "cvt.rni.s32.f64 %r42, %r125;\n\t" "cvt.rm.f64.s32 %r125, %r42;\n\t" "cvt.rni.s32.f64 %r43, %r126;\n\t" "cvt.rm.f64.s32 %r126, %r43;\n\t" "cvt.rni.s32.f64 %r44, %r127;\n\t" "cvt.rm.f64.s32 %r127, %r44;\n\t" "cvt.rni.s32.f64 %r45, %r128;\n\t" "cvt.rm.f64.s32 %r128, %r45;\n\t" "cvt.rni.s32.f64 %r30, %r113;\n\t" "cvt.rm.f64.s32 %r113, %r30;\n\t" "cvt.rni.s32.f64 %r31, %r114;\n\t" "cvt.rm.f64.s32 %r114, %r31;\n\t" "cvt.rni.s32.f64 %r32, %r115;\n\t" "cvt.rm.f64.s32 %r115, %r32;\n\t" "cvt.rni.s32.f64 %r33, %r116;\n\t" "cvt.rm.f64.s32 %r116, %r33;\n\t" "cvt.rni.s32.f64 %r34, %r117;\n\t" "cvt.rm.f64.s32 %r117, %r34;\n\t" "cvt.rni.s32.f64 %r35, %r118;\n\t" "cvt.rm.f64.s32 %r118, %r35;\n\t" "cvt.rni.s32.f64 %r36, %r119;\n\t" "cvt.rm.f64.s32 %r119, %r36;\n\t" "cvt.rni.s32.f64 %r37, %r120;\n\t" "cvt.rm.f64.s32 %r120, %r37;\n\t" "cvt.rni.s32.f64 %r38, %r121;\n\t" "cvt.rm.f64.s32 %r121, %r38;\n\t" "cvt.rni.s32.f64 %r39, %r122;\n\t" "cvt.rm.f64.s32 %r122, %r39;\n\t" "cvt.rni.s32.f64 %r40, %r123;\n\t" "cvt.rm.f64.s32 %r123, %r40;\n\t" "cvt.rni.s32.f64 %r41, %r124;\n\t" "cvt.rm.f64.s32 %r124, %r41;\n\t" "cvt.rni.s32.f64 %r42, %r125;\n\t" "cvt.rm.f64.s32 %r125, %r42;\n\t" "cvt.rni.s32.f64 %r43, %r126;\n\t" "cvt.rm.f64.s32 %r126, %r43;\n\t" "cvt.rni.s32.f64 %r44, %r127;\n\t" "cvt.rm.f64.s32 %r127, %r44;\n\t" "cvt.rni.s32.f64 %r45, %r128;\n\t" "cvt.rm.f64.s32 %r128, %r45;\n\t" "cvt.rni.s32.f64 %r30, %r113;\n\t" "cvt.rm.f64.s32 %r113, %r30;\n\t" "cvt.rni.s32.f64 %r31, %r114;\n\t" "cvt.rm.f64.s32 %r114, %r31;\n\t" "cvt.rni.s32.f64 %r32, %r115;\n\t" "cvt.rm.f64.s32 %r115, %r32;\n\t" "cvt.rni.s32.f64 %r33, %r116;\n\t" "cvt.rm.f64.s32 %r116, %r33;\n\t" "cvt.rni.s32.f64 %r34, %r117;\n\t" "cvt.rm.f64.s32 %r117, %r34;\n\t" "cvt.rni.s32.f64 %r35, %r118;\n\t" "cvt.rm.f64.s32 %r118, %r35;\n\t" "cvt.rni.s32.f64 %r36, %r119;\n\t" "cvt.rm.f64.s32 %r119, %r36;\n\t" "cvt.rni.s32.f64 %r37, %r120;\n\t" "cvt.rm.f64.s32 %r120, %r37;\n\t" "cvt.rni.s32.f64 %r38, %r121;\n\t" "cvt.rm.f64.s32 %r121, %r38;\n\t" "cvt.rni.s32.f64 %r39, %r122;\n\t" "cvt.rm.f64.s32 %r122, %r39;\n\t" "cvt.rni.s32.f64 %r40, %r123;\n\t" "cvt.rm.f64.s32 %r123, %r40;\n\t" "cvt.rni.s32.f64 %r41, %r124;\n\t" "cvt.rm.f64.s32 %r124, %r41;\n\t" "cvt.rni.s32.f64 %r42, %r125;\n\t" "cvt.rm.f64.s32 %r125, %r42;\n\t" "cvt.rni.s32.f64 %r43, %r126;\n\t" "cvt.rm.f64.s32 %r126, %r43;\n\t" "cvt.rni.s32.f64 %r44, %r127;\n\t" "cvt.rm.f64.s32 %r127, %r44;\n\t" "cvt.rni.s32.f64 %r45, %r128;\n\t" "cvt.rm.f64.s32 %r128, %r45;\n\t" "cvt.rni.s32.f64 %r30, %r113;\n\t" "cvt.rm.f64.s32 %r113, %r30;\n\t" "cvt.rni.s32.f64 %r31, %r114;\n\t" "cvt.rm.f64.s32 %r114, %r31;\n\t" "cvt.rni.s32.f64 %r32, %r115;\n\t" "cvt.rm.f64.s32 %r115, %r32;\n\t" "cvt.rni.s32.f64 %r33, %r116;\n\t" "cvt.rm.f64.s32 %r116, %r33;\n\t" "cvt.rni.s32.f64 %r34, %r117;\n\t" "cvt.rm.f64.s32 %r117, %r34;\n\t" "cvt.rni.s32.f64 %r35, %r118;\n\t" "cvt.rm.f64.s32 %r118, %r35;\n\t" "cvt.rni.s32.f64 %r36, %r119;\n\t" "cvt.rm.f64.s32 %r119, %r36;\n\t" "cvt.rni.s32.f64 %r37, %r120;\n\t" "cvt.rm.f64.s32 %r120, %r37;\n\t" "cvt.rni.s32.f64 %r38, %r121;\n\t" "cvt.rm.f64.s32 %r121, %r38;\n\t" "cvt.rni.s32.f64 %r39, %r122;\n\t" "cvt.rm.f64.s32 %r122, %r39;\n\t" "cvt.rni.s32.f64 %r40, %r123;\n\t" "cvt.rm.f64.s32 %r123, %r40;\n\t" "cvt.rni.s32.f64 %r41, %r124;\n\t" "cvt.rm.f64.s32 %r124, %r41;\n\t" "cvt.rni.s32.f64 %r42, %r125;\n\t" "cvt.rm.f64.s32 %r125, %r42;\n\t" "cvt.rni.s32.f64 %r43, %r126;\n\t" "cvt.rm.f64.s32 %r126, %r43;\n\t" "cvt.rni.s32.f64 %r44, %r127;\n\t" "cvt.rm.f64.s32 %r127, %r44;\n\t" "cvt.rni.s32.f64 %r45, %r128;\n\t" "cvt.rm.f64.s32 %r128, %r45;\n\t" "cvt.rni.s32.f64 %r30, %r113;\n\t" "cvt.rm.f64.s32 %r113, %r30;\n\t" "cvt.rni.s32.f64 %r31, %r114;\n\t" "cvt.rm.f64.s32 %r114, %r31;\n\t" "cvt.rni.s32.f64 %r32, %r115;\n\t" "cvt.rm.f64.s32 %r115, %r32;\n\t" "cvt.rni.s32.f64 %r33, %r116;\n\t" "cvt.rm.f64.s32 %r116, %r33;\n\t" "cvt.rni.s32.f64 %r34, %r117;\n\t" "cvt.rm.f64.s32 %r117, %r34;\n\t" "cvt.rni.s32.f64 %r35, %r118;\n\t" "cvt.rm.f64.s32 %r118, %r35;\n\t" "cvt.rni.s32.f64 %r36, %r119;\n\t" "cvt.rm.f64.s32 %r119, %r36;\n\t" "cvt.rni.s32.f64 %r37, %r120;\n\t" "cvt.rm.f64.s32 %r120, %r37;\n\t" "cvt.rni.s32.f64 %r38, %r121;\n\t" "cvt.rm.f64.s32 %r121, %r38;\n\t" "cvt.rni.s32.f64 %r39, %r122;\n\t" "cvt.rm.f64.s32 %r122, %r39;\n\t" "cvt.rni.s32.f64 %r40, %r123;\n\t" "cvt.rm.f64.s32 %r123, %r40;\n\t" "cvt.rni.s32.f64 %r41, %r124;\n\t" "cvt.rm.f64.s32 %r124, %r41;\n\t" "cvt.rni.s32.f64 %r42, %r125;\n\t" "cvt.rm.f64.s32 %r125, %r42;\n\t" "cvt.rni.s32.f64 %r43, %r126;\n\t" "cvt.rm.f64.s32 %r126, %r43;\n\t" "cvt.rni.s32.f64 %r44, %r127;\n\t" "cvt.rm.f64.s32 %r127, %r44;\n\t" "cvt.rni.s32.f64 %r45, %r128;\n\t" "cvt.rm.f64.s32 %r128, %r45;\n\t" ); } } // __syncthreads(); // if ((blockDim.x * blockIdx.x + threadIdx.x) == 0) *D = I1; // __syncthreads(); } void usage() { std::cout << "Usage ./binary <num_blocks> <num_threads_per_block> <iterations>" "threads active per warp" << std::endl; } int main(int argc, char **argv) { if (argc != 5) { usage(); exit(1); } int num_blocks = atoi(argv[1]); int num_threads_per_block = atoi(argv[2]); int iterations = atoi(argv[3]); int divergence = atoi(argv[4]); // h_A = new float(2.0); // h_B = new float(3.0); // h_C = new float(4.0); // cudaMalloc((void**)&d_A, sizeof(float)); // cudaMalloc((void**)&d_B, sizeof(float)); // cudaMalloc((void**)&d_C, sizeof(float)); cudaMalloc((void**)&d_res, sizeof(float)); // cudaMemcpy(d_A, h_A, sizeof(float), cudaMemcpyHostToDevice); // cudaMemcpy(d_B, h_B, sizeof(float), cudaMemcpyHostToDevice); // cudaMemcpy(d_C, h_C, sizeof(float), cudaMemcpyHostToDevice); cudaEvent_t start, stop; float time; cudaEventCreate(&start); cudaEventCreate(&stop); cudaEventRecord(start, 0); cudaProfilerStart(); // compute<<<num_blocks, num_threads_per_block>>>(d_A, d_B, d_C, d_res, iterations); compute<<<num_blocks, num_threads_per_block>>>(d_res, iterations, divergence); cudaDeviceSynchronize(); cudaProfilerStop(); cudaEventRecord(stop, 0); cudaEventSynchronize(stop); cudaEventElapsedTime(&time, start, stop); std::cout << "GPU Elapsed Time = " << time << std::endl; cudaEventDestroy(start); cudaEventDestroy(stop); cudaMemcpy(h_res, d_res, sizeof(float), cudaMemcpyDeviceToHost); return 0; }
2337f598a0fe5df27247ed5a93612517fde8fd4d.hip
// !!! This is a file automatically generated by hipify!!! /** * Copyright 1993-2015 NVIDIA Corporation. All rights reserved. * * Please refer to the NVIDIA end user license agreement (EULA) associated * with this source code for terms and conditions that govern your use of * this software. Any use, reproduction, disclosure, or distribution of * this software and related documentation outside the terms of the EULA * is strictly prohibited. * */ /** * Vector addition: C = A + B. * * This sample is a very basic sample that implements element by element * vector addition. It is the same as the sample illustrating Chapter 2 * of the programming guide with some additions like error checking. */ #include <stdio.h> // For the CUDA runtime routines (prefixed with "cuda_") #include <hip/hip_runtime.h> #include <helper_cuda.h> /** * CUDA Kernel Device code * * Computes the vector addition of A and B into C. The 3 vectors have the same * number of elements numElements. */ __global__ void vectorAdd(const float *A, const float *B, float *C, int numElements) { int i = blockDim.x * blockIdx.x + threadIdx.x; if (i < numElements) { C[i] = A[i] + B[i]; } } /** * Host main routine */ int main(void) { // Error code to check return values for CUDA calls hipError_t err = hipSuccess; // Print the vector length to be used, and compute its size int numElements = 100000; size_t size = numElements * sizeof(float); printf("[Vector addition of %d elements]\n", numElements); // Allocate the host input vector A float *h_A = (float *)malloc(size); // Allocate the host input vector B float *h_B = (float *)malloc(size); // Allocate the host output vector C float *h_C = (float *)malloc(size); // Verify that allocations succeeded if (h_A == NULL || h_B == NULL || h_C == NULL) { fprintf(stderr, "Failed to allocate host vectors!\n"); exit(EXIT_FAILURE); } // Initialize the host input vectors for (int i = 0; i < numElements; ++i) { h_A[i] = rand()/(float)RAND_MAX; h_B[i] = rand()/(float)RAND_MAX; } // Allocate the device input vector A float *d_A = NULL; err = hipMalloc((void **)&d_A, size); if (err != hipSuccess) { fprintf(stderr, "Failed to allocate device vector A (error code %s)!\n", hipGetErrorString(err)); exit(EXIT_FAILURE); } // Allocate the device input vector B float *d_B = NULL; err = hipMalloc((void **)&d_B, size); if (err != hipSuccess) { fprintf(stderr, "Failed to allocate device vector B (error code %s)!\n", hipGetErrorString(err)); exit(EXIT_FAILURE); } // Allocate the device output vector C float *d_C = NULL; err = hipMalloc((void **)&d_C, size); if (err != hipSuccess) { fprintf(stderr, "Failed to allocate device vector C (error code %s)!\n", hipGetErrorString(err)); exit(EXIT_FAILURE); } // Copy the host input vectors A and B in host memory to the device input vectors in // device memory printf("Copy input data from the host memory to the CUDA device\n"); err = hipMemcpy(d_A, h_A, size, hipMemcpyHostToDevice); if (err != hipSuccess) { fprintf(stderr, "Failed to copy vector A from host to device (error code %s)!\n", hipGetErrorString(err)); exit(EXIT_FAILURE); } err = hipMemcpy(d_B, h_B, size, hipMemcpyHostToDevice); if (err != hipSuccess) { fprintf(stderr, "Failed to copy vector B from host to device (error code %s)!\n", hipGetErrorString(err)); exit(EXIT_FAILURE); } // Launch the Vector Add CUDA Kernel int threadsPerBlock = 256; int blocksPerGrid =(numElements + threadsPerBlock - 1) / threadsPerBlock; printf("CUDA kernel launch with %d blocks of %d threads\n", blocksPerGrid, threadsPerBlock); hipLaunchKernelGGL(( vectorAdd), dim3(blocksPerGrid), dim3(threadsPerBlock), 0, 0, d_A, d_B, d_C, numElements); err = hipGetLastError(); if (err != hipSuccess) { fprintf(stderr, "Failed to launch vectorAdd kernel (error code %s)!\n", hipGetErrorString(err)); exit(EXIT_FAILURE); } // Copy the device result vector in device memory to the host result vector // in host memory. printf("Copy output data from the CUDA device to the host memory\n"); err = hipMemcpy(h_C, d_C, size, hipMemcpyDeviceToHost); if (err != hipSuccess) { fprintf(stderr, "Failed to copy vector C from device to host (error code %s)!\n", hipGetErrorString(err)); exit(EXIT_FAILURE); } // Verify that the result vector is correct for (int i = 0; i < numElements; ++i) { if (fabs(h_A[i] + h_B[i] - h_C[i]) > 1e-5) { fprintf(stderr, "Result verification failed at element %d!\n", i); exit(EXIT_FAILURE); } } printf("Test PASSED\n"); // Free device global memory err = hipFree(d_A); if (err != hipSuccess) { fprintf(stderr, "Failed to free device vector A (error code %s)!\n", hipGetErrorString(err)); exit(EXIT_FAILURE); } err = hipFree(d_B); if (err != hipSuccess) { fprintf(stderr, "Failed to free device vector B (error code %s)!\n", hipGetErrorString(err)); exit(EXIT_FAILURE); } err = hipFree(d_C); if (err != hipSuccess) { fprintf(stderr, "Failed to free device vector C (error code %s)!\n", hipGetErrorString(err)); exit(EXIT_FAILURE); } // Free host memory free(h_A); free(h_B); free(h_C); printf("Done\n"); return 0; }
2337f598a0fe5df27247ed5a93612517fde8fd4d.cu
/** * Copyright 1993-2015 NVIDIA Corporation. All rights reserved. * * Please refer to the NVIDIA end user license agreement (EULA) associated * with this source code for terms and conditions that govern your use of * this software. Any use, reproduction, disclosure, or distribution of * this software and related documentation outside the terms of the EULA * is strictly prohibited. * */ /** * Vector addition: C = A + B. * * This sample is a very basic sample that implements element by element * vector addition. It is the same as the sample illustrating Chapter 2 * of the programming guide with some additions like error checking. */ #include <stdio.h> // For the CUDA runtime routines (prefixed with "cuda_") #include <cuda_runtime.h> #include <helper_cuda.h> /** * CUDA Kernel Device code * * Computes the vector addition of A and B into C. The 3 vectors have the same * number of elements numElements. */ __global__ void vectorAdd(const float *A, const float *B, float *C, int numElements) { int i = blockDim.x * blockIdx.x + threadIdx.x; if (i < numElements) { C[i] = A[i] + B[i]; } } /** * Host main routine */ int main(void) { // Error code to check return values for CUDA calls cudaError_t err = cudaSuccess; // Print the vector length to be used, and compute its size int numElements = 100000; size_t size = numElements * sizeof(float); printf("[Vector addition of %d elements]\n", numElements); // Allocate the host input vector A float *h_A = (float *)malloc(size); // Allocate the host input vector B float *h_B = (float *)malloc(size); // Allocate the host output vector C float *h_C = (float *)malloc(size); // Verify that allocations succeeded if (h_A == NULL || h_B == NULL || h_C == NULL) { fprintf(stderr, "Failed to allocate host vectors!\n"); exit(EXIT_FAILURE); } // Initialize the host input vectors for (int i = 0; i < numElements; ++i) { h_A[i] = rand()/(float)RAND_MAX; h_B[i] = rand()/(float)RAND_MAX; } // Allocate the device input vector A float *d_A = NULL; err = cudaMalloc((void **)&d_A, size); if (err != cudaSuccess) { fprintf(stderr, "Failed to allocate device vector A (error code %s)!\n", cudaGetErrorString(err)); exit(EXIT_FAILURE); } // Allocate the device input vector B float *d_B = NULL; err = cudaMalloc((void **)&d_B, size); if (err != cudaSuccess) { fprintf(stderr, "Failed to allocate device vector B (error code %s)!\n", cudaGetErrorString(err)); exit(EXIT_FAILURE); } // Allocate the device output vector C float *d_C = NULL; err = cudaMalloc((void **)&d_C, size); if (err != cudaSuccess) { fprintf(stderr, "Failed to allocate device vector C (error code %s)!\n", cudaGetErrorString(err)); exit(EXIT_FAILURE); } // Copy the host input vectors A and B in host memory to the device input vectors in // device memory printf("Copy input data from the host memory to the CUDA device\n"); err = cudaMemcpy(d_A, h_A, size, cudaMemcpyHostToDevice); if (err != cudaSuccess) { fprintf(stderr, "Failed to copy vector A from host to device (error code %s)!\n", cudaGetErrorString(err)); exit(EXIT_FAILURE); } err = cudaMemcpy(d_B, h_B, size, cudaMemcpyHostToDevice); if (err != cudaSuccess) { fprintf(stderr, "Failed to copy vector B from host to device (error code %s)!\n", cudaGetErrorString(err)); exit(EXIT_FAILURE); } // Launch the Vector Add CUDA Kernel int threadsPerBlock = 256; int blocksPerGrid =(numElements + threadsPerBlock - 1) / threadsPerBlock; printf("CUDA kernel launch with %d blocks of %d threads\n", blocksPerGrid, threadsPerBlock); vectorAdd<<<blocksPerGrid, threadsPerBlock>>>(d_A, d_B, d_C, numElements); err = cudaGetLastError(); if (err != cudaSuccess) { fprintf(stderr, "Failed to launch vectorAdd kernel (error code %s)!\n", cudaGetErrorString(err)); exit(EXIT_FAILURE); } // Copy the device result vector in device memory to the host result vector // in host memory. printf("Copy output data from the CUDA device to the host memory\n"); err = cudaMemcpy(h_C, d_C, size, cudaMemcpyDeviceToHost); if (err != cudaSuccess) { fprintf(stderr, "Failed to copy vector C from device to host (error code %s)!\n", cudaGetErrorString(err)); exit(EXIT_FAILURE); } // Verify that the result vector is correct for (int i = 0; i < numElements; ++i) { if (fabs(h_A[i] + h_B[i] - h_C[i]) > 1e-5) { fprintf(stderr, "Result verification failed at element %d!\n", i); exit(EXIT_FAILURE); } } printf("Test PASSED\n"); // Free device global memory err = cudaFree(d_A); if (err != cudaSuccess) { fprintf(stderr, "Failed to free device vector A (error code %s)!\n", cudaGetErrorString(err)); exit(EXIT_FAILURE); } err = cudaFree(d_B); if (err != cudaSuccess) { fprintf(stderr, "Failed to free device vector B (error code %s)!\n", cudaGetErrorString(err)); exit(EXIT_FAILURE); } err = cudaFree(d_C); if (err != cudaSuccess) { fprintf(stderr, "Failed to free device vector C (error code %s)!\n", cudaGetErrorString(err)); exit(EXIT_FAILURE); } // Free host memory free(h_A); free(h_B); free(h_C); printf("Done\n"); return 0; }
9b83e2ced6e81651db75c6c887a39aa59e0ef9c5.hip
// !!! This is a file automatically generated by hipify!!! /** * Copyright 1993-2015 NVIDIA Corporation. All rights reserved. * * Please refer to the NVIDIA end user license agreement (EULA) associated * with this source code for terms and conditions that govern your use of * this software. Any use, reproduction, disclosure, or distribution of * this software and related documentation outside the terms of the EULA * is strictly prohibited. * */ /** * Matrix multiplication: C = A * B. * Host code. * * This sample implements matrix multiplication as described in Chapter 3 * of the programming guide. * It has been written for clarity of exposition to illustrate various CUDA * programming principles, not with the goal of providing the most * performant generic kernel for matrix multiplication. * * See also: * V. Volkov and J. Demmel, "Benchmarking GPUs to tune dense linear algebra," * in Proc. 2008 ACM/IEEE Conf. on Supercomputing (SC '08), * Piscataway, NJ: IEEE Press, 2008, pp. Art. 31:1-11. */ // System includes #include <stdio.h> #include <stdlib.h> #include <assert.h> // CUDA runtime #include <hip/hip_runtime.h> // Helper functions and utilities to work with CUDA #include <helper_functions.h> /** * Matrix multiplication (CUDA Kernel) on the device: C = A * B * wA is A's width and wB is B's width */ template <int BLOCK_SIZE> __global__ void matrixMulCUDA(float *C, float *A, float *B, int wA, int wB) { // Block index int bx = blockIdx.x; int by = blockIdx.y; // Thread index int tx = threadIdx.x; int ty = threadIdx.y; // Index of the first sub-matrix of A processed by the block int aBegin = wA * BLOCK_SIZE * by; // Index of the last sub-matrix of A processed by the block int aEnd = aBegin + wA - 1; // Step size used to iterate through the sub-matrices of A int aStep = BLOCK_SIZE; // Index of the first sub-matrix of B processed by the block int bBegin = BLOCK_SIZE * bx; // Step size used to iterate through the sub-matrices of B int bStep = BLOCK_SIZE * wB; // Csub is used to store the element of the block sub-matrix // that is computed by the thread float Csub = 0; // Loop over all the sub-matrices of A and B // required to compute the block sub-matrix for (int a = aBegin, b = bBegin; a <= aEnd; a += aStep, b += bStep) { // Declaration of the shared memory array As used to // store the sub-matrix of A __shared__ float As[BLOCK_SIZE][BLOCK_SIZE]; // Declaration of the shared memory array Bs used to // store the sub-matrix of B __shared__ float Bs[BLOCK_SIZE][BLOCK_SIZE]; // Load the matrices from device memory // to shared memory; each thread loads // one element of each matrix As[ty][tx] = A[a + wA * ty + tx]; Bs[ty][tx] = B[b + wB * ty + tx]; // Synchronize to make sure the matrices are loaded __syncthreads(); // Multiply the two matrices together; // each thread computes one element // of the block sub-matrix #pragma unroll for (int k = 0; k < BLOCK_SIZE; ++k) { Csub += As[ty][k] * Bs[k][tx]; } // Synchronize to make sure that the preceding // computation is done before loading two new // sub-matrices of A and B in the next iteration __syncthreads(); } // Write the block sub-matrix to device memory; // each thread writes one element int c = wB * BLOCK_SIZE * by + BLOCK_SIZE * bx; C[c + wB * ty + tx] = Csub; } void constantInit(float *data, int size, float val) { for (int i = 0; i < size; ++i) { data[i] = val; } } /** * Run a simple test of matrix multiplication using CUDA */ int matrixMultiply(int argc, char **argv, int block_size, dim3 &dimsA, dim3 &dimsB) { // Allocate host memory for matrices A and B unsigned int size_A = dimsA.x * dimsA.y; unsigned int mem_size_A = sizeof(float) * size_A; float *h_A = (float *)malloc(mem_size_A); unsigned int size_B = dimsB.x * dimsB.y; unsigned int mem_size_B = sizeof(float) * size_B; float *h_B = (float *)malloc(mem_size_B); // Initialize host memory const float valB = 0.01f; constantInit(h_A, size_A, 1.0f); constantInit(h_B, size_B, valB); // Allocate device memory float *d_A, *d_B, *d_C; // Allocate host matrix C dim3 dimsC(dimsB.x, dimsA.y, 1); unsigned int mem_size_C = dimsC.x * dimsC.y * sizeof(float); float *h_C = (float *)malloc(mem_size_C); if (h_C == NULL) { fprintf(stderr, "Failed to allocate host matrix C!\n"); exit(EXIT_FAILURE); } hipError_t error; error = hipMalloc((void **)&d_A, mem_size_A); if (error != hipSuccess) { printf("hipMalloc d_A returned error code %d, line(%d)\n", error, __LINE__); exit(EXIT_FAILURE); } error = hipMalloc((void **)&d_B, mem_size_B); if (error != hipSuccess) { printf("hipMalloc d_B returned error code %d, line(%d)\n", error, __LINE__); exit(EXIT_FAILURE); } error = hipMalloc((void **)&d_C, mem_size_C); if (error != hipSuccess) { printf("hipMalloc d_C returned error code %d, line(%d)\n", error, __LINE__); exit(EXIT_FAILURE); } // copy host memory to device error = hipMemcpy(d_A, h_A, mem_size_A, hipMemcpyHostToDevice); if (error != hipSuccess) { printf("hipMemcpy (d_A,h_A) returned error code %d, line(%d)\n", error, __LINE__); exit(EXIT_FAILURE); } error = hipMemcpy(d_B, h_B, mem_size_B, hipMemcpyHostToDevice); if (error != hipSuccess) { printf("hipMemcpy (d_B,h_B) returned error code %d, line(%d)\n", error, __LINE__); exit(EXIT_FAILURE); } // Setup execution parameters dim3 threads(block_size, block_size); dim3 grid(dimsB.x / threads.x, dimsA.y / threads.y); // Create and start timer printf("Computing result using CUDA Kernel...\n"); // Performs warmup operation using matrixMul CUDA kernel if (block_size == 16) { matrixMulCUDA<16> << < grid, threads >> >(d_C, d_A, d_B, dimsA.x, dimsB.x); } else { matrixMulCUDA<32> << < grid, threads >> >(d_C, d_A, d_B, dimsA.x, dimsB.x); } printf("done\n"); hipDeviceSynchronize(); // Allocate CUDA events that we'll use for timing hipEvent_t start; error = hipEventCreate(&start); if (error != hipSuccess) { fprintf(stderr, "Failed to create start event (error code %s)!\n", hipGetErrorString(error)); exit(EXIT_FAILURE); } hipEvent_t stop; error = hipEventCreate(&stop); if (error != hipSuccess) { fprintf(stderr, "Failed to create stop event (error code %s)!\n", hipGetErrorString(error)); exit(EXIT_FAILURE); } // Record the start event error = hipEventRecord(start, NULL); if (error != hipSuccess) { fprintf(stderr, "Failed to record start event (error code %s)!\n", hipGetErrorString(error)); exit(EXIT_FAILURE); } // Execute the kernel int nIter = 300; for (int j = 0; j < nIter; j++) { if (block_size == 16) { matrixMulCUDA<16> << < grid, threads >> >(d_C, d_A, d_B, dimsA.x, dimsB.x); } else { matrixMulCUDA<32> << < grid, threads >> >(d_C, d_A, d_B, dimsA.x, dimsB.x); } } // Record the stop event error = hipEventRecord(stop, NULL); if (error != hipSuccess) { fprintf(stderr, "Failed to record stop event (error code %s)!\n", hipGetErrorString(error)); exit(EXIT_FAILURE); } // Wait for the stop event to complete error = hipEventSynchronize(stop); if (error != hipSuccess) { fprintf(stderr, "Failed to synchronize on the stop event (error code %s)!\n", hipGetErrorString(error)); exit(EXIT_FAILURE); } float msecTotal = 0.0f; error = hipEventElapsedTime(&msecTotal, start, stop); if (error != hipSuccess) { fprintf(stderr, "Failed to get time elapsed between events (error code %s)!\n", hipGetErrorString(error)); exit(EXIT_FAILURE); } // Compute and print the performance float msecPerMatrixMul = msecTotal / nIter; double flopsPerMatrixMul = 2.0 * (double)dimsA.x * (double)dimsA.y * (double)dimsB.x; double gigaFlops = (flopsPerMatrixMul * 1.0e-9f) / (msecPerMatrixMul / 1000.0f); printf( "Performance= %.2f GFlop/s, Time= %.3f msec, Size= %.0f Ops, WorkgroupSize= %u threads/block\n", gigaFlops, msecPerMatrixMul, flopsPerMatrixMul, threads.x * threads.y); // Copy result from device to host error = hipMemcpy(h_C, d_C, mem_size_C, hipMemcpyDeviceToHost); if (error != hipSuccess) { printf("hipMemcpy (h_C,d_C) returned error code %d, line(%d)\n", error, __LINE__); exit(EXIT_FAILURE); } printf("Checking computed result for correctness: "); bool correct = true; // test relative error by the formula // |<x, y>_cpu - <x,y>_gpu|/<|x|, |y|> < eps double eps = 1.e-6; // machine zero for (int i = 0; i < (int)(dimsC.x * dimsC.y); i++) { double abs_err = fabs(h_C[i] - (dimsA.x * valB)); double dot_length = dimsA.x; double abs_val = fabs(h_C[i]); double rel_err = abs_err / abs_val / dot_length; if (rel_err > eps) { printf("Error! Matrix[%05d]=%.8f, ref=%.8f error term is > %E\n", i, h_C[i], dimsA.x*valB, eps); correct = false; } } printf("%s\n", correct ? "Result = PASS" : "Result = FAIL"); // Clean up memory free(h_A); free(h_B); free(h_C); hipFree(d_A); hipFree(d_B); hipFree(d_C); printf("\nNOTE: The CUDA Samples are not meant for performance measurements. Results may vary when GPU Boost is enabled.\n"); // hipDeviceReset causes the driver to clean up all state. While // not mandatory in normal operation, it is good practice. It is also // needed to ensure correct operation when the application is being // profiled. Calling hipDeviceReset causes all profile data to be // flushed before the application exits hipDeviceReset(); if (correct) { return EXIT_SUCCESS; } else { return EXIT_FAILURE; } } /** * Program main */ int main(int argc, char **argv) { printf("[Matrix Multiply Using CUDA] - Starting...\n"); if (checkCmdLineFlag(argc, (const char **)argv, "help") || checkCmdLineFlag(argc, (const char **)argv, "?")) { printf("Usage -device=n (n >= 0 for deviceID)\n"); printf(" -wA=WidthA -hA=HeightA (Width x Height of Matrix A)\n"); printf(" -wB=WidthB -hB=HeightB (Width x Height of Matrix B)\n"); printf(" Note: Outer matrix dimensions of A & B matrices must be equal.\n"); exit(EXIT_SUCCESS); } // By default, we use device 0, otherwise we override the device ID based on what is provided at the command line int devID = 0; if (checkCmdLineFlag(argc, (const char **)argv, "device")) { devID = getCmdLineArgumentInt(argc, (const char **)argv, "device"); hipSetDevice(devID); } hipError_t error; hipDeviceProp_t deviceProp; error = hipGetDevice(&devID); if (error != hipSuccess) { printf("hipGetDevice returned error code %d, line(%d)\n", error, __LINE__); } error = hipGetDeviceProperties(&deviceProp, devID); if (deviceProp.computeMode == hipComputeModeProhibited) { fprintf(stderr, "Error: device is running in <Compute Mode Prohibited>, no threads can use ::hipSetDevice().\n"); exit(EXIT_SUCCESS); } if (error != hipSuccess) { printf("hipGetDeviceProperties returned error code %d, line(%d)\n", error, __LINE__); } else { printf("GPU Device %d: \"%s\" with compute capability %d.%d\n\n", devID, deviceProp.name, deviceProp.major, deviceProp.minor); } // Use a larger block size for Fermi and above int block_size = (deviceProp.major < 2) ? 16 : 32; dim3 dimsA(5 * 2 * block_size, 5 * 2 * block_size, 1); dim3 dimsB(5 * 4 * block_size, 5 * 2 * block_size, 1); // width of Matrix A if (checkCmdLineFlag(argc, (const char **)argv, "wA")) { dimsA.x = getCmdLineArgumentInt(argc, (const char **)argv, "wA"); } // height of Matrix A if (checkCmdLineFlag(argc, (const char **)argv, "hA")) { dimsA.y = getCmdLineArgumentInt(argc, (const char **)argv, "hA"); } // width of Matrix B if (checkCmdLineFlag(argc, (const char **)argv, "wB")) { dimsB.x = getCmdLineArgumentInt(argc, (const char **)argv, "wB"); } // height of Matrix B if (checkCmdLineFlag(argc, (const char **)argv, "hB")) { dimsB.y = getCmdLineArgumentInt(argc, (const char **)argv, "hB"); } if (dimsA.x != dimsB.y) { printf("Error: outer matrix dimensions must be equal. (%d != %d)\n", dimsA.x, dimsB.y); exit(EXIT_FAILURE); } printf("MatrixA(%d,%d), MatrixB(%d,%d)\n", dimsA.x, dimsA.y, dimsB.x, dimsB.y); int matrix_result = matrixMultiply(argc, argv, block_size, dimsA, dimsB); exit(matrix_result); }
9b83e2ced6e81651db75c6c887a39aa59e0ef9c5.cu
/** * Copyright 1993-2015 NVIDIA Corporation. All rights reserved. * * Please refer to the NVIDIA end user license agreement (EULA) associated * with this source code for terms and conditions that govern your use of * this software. Any use, reproduction, disclosure, or distribution of * this software and related documentation outside the terms of the EULA * is strictly prohibited. * */ /** * Matrix multiplication: C = A * B. * Host code. * * This sample implements matrix multiplication as described in Chapter 3 * of the programming guide. * It has been written for clarity of exposition to illustrate various CUDA * programming principles, not with the goal of providing the most * performant generic kernel for matrix multiplication. * * See also: * V. Volkov and J. Demmel, "Benchmarking GPUs to tune dense linear algebra," * in Proc. 2008 ACM/IEEE Conf. on Supercomputing (SC '08), * Piscataway, NJ: IEEE Press, 2008, pp. Art. 31:1-11. */ // System includes #include <stdio.h> #include <stdlib.h> #include <assert.h> // CUDA runtime #include <cuda_runtime.h> // Helper functions and utilities to work with CUDA #include <helper_functions.h> /** * Matrix multiplication (CUDA Kernel) on the device: C = A * B * wA is A's width and wB is B's width */ template <int BLOCK_SIZE> __global__ void matrixMulCUDA(float *C, float *A, float *B, int wA, int wB) { // Block index int bx = blockIdx.x; int by = blockIdx.y; // Thread index int tx = threadIdx.x; int ty = threadIdx.y; // Index of the first sub-matrix of A processed by the block int aBegin = wA * BLOCK_SIZE * by; // Index of the last sub-matrix of A processed by the block int aEnd = aBegin + wA - 1; // Step size used to iterate through the sub-matrices of A int aStep = BLOCK_SIZE; // Index of the first sub-matrix of B processed by the block int bBegin = BLOCK_SIZE * bx; // Step size used to iterate through the sub-matrices of B int bStep = BLOCK_SIZE * wB; // Csub is used to store the element of the block sub-matrix // that is computed by the thread float Csub = 0; // Loop over all the sub-matrices of A and B // required to compute the block sub-matrix for (int a = aBegin, b = bBegin; a <= aEnd; a += aStep, b += bStep) { // Declaration of the shared memory array As used to // store the sub-matrix of A __shared__ float As[BLOCK_SIZE][BLOCK_SIZE]; // Declaration of the shared memory array Bs used to // store the sub-matrix of B __shared__ float Bs[BLOCK_SIZE][BLOCK_SIZE]; // Load the matrices from device memory // to shared memory; each thread loads // one element of each matrix As[ty][tx] = A[a + wA * ty + tx]; Bs[ty][tx] = B[b + wB * ty + tx]; // Synchronize to make sure the matrices are loaded __syncthreads(); // Multiply the two matrices together; // each thread computes one element // of the block sub-matrix #pragma unroll for (int k = 0; k < BLOCK_SIZE; ++k) { Csub += As[ty][k] * Bs[k][tx]; } // Synchronize to make sure that the preceding // computation is done before loading two new // sub-matrices of A and B in the next iteration __syncthreads(); } // Write the block sub-matrix to device memory; // each thread writes one element int c = wB * BLOCK_SIZE * by + BLOCK_SIZE * bx; C[c + wB * ty + tx] = Csub; } void constantInit(float *data, int size, float val) { for (int i = 0; i < size; ++i) { data[i] = val; } } /** * Run a simple test of matrix multiplication using CUDA */ int matrixMultiply(int argc, char **argv, int block_size, dim3 &dimsA, dim3 &dimsB) { // Allocate host memory for matrices A and B unsigned int size_A = dimsA.x * dimsA.y; unsigned int mem_size_A = sizeof(float) * size_A; float *h_A = (float *)malloc(mem_size_A); unsigned int size_B = dimsB.x * dimsB.y; unsigned int mem_size_B = sizeof(float) * size_B; float *h_B = (float *)malloc(mem_size_B); // Initialize host memory const float valB = 0.01f; constantInit(h_A, size_A, 1.0f); constantInit(h_B, size_B, valB); // Allocate device memory float *d_A, *d_B, *d_C; // Allocate host matrix C dim3 dimsC(dimsB.x, dimsA.y, 1); unsigned int mem_size_C = dimsC.x * dimsC.y * sizeof(float); float *h_C = (float *)malloc(mem_size_C); if (h_C == NULL) { fprintf(stderr, "Failed to allocate host matrix C!\n"); exit(EXIT_FAILURE); } cudaError_t error; error = cudaMalloc((void **)&d_A, mem_size_A); if (error != cudaSuccess) { printf("cudaMalloc d_A returned error code %d, line(%d)\n", error, __LINE__); exit(EXIT_FAILURE); } error = cudaMalloc((void **)&d_B, mem_size_B); if (error != cudaSuccess) { printf("cudaMalloc d_B returned error code %d, line(%d)\n", error, __LINE__); exit(EXIT_FAILURE); } error = cudaMalloc((void **)&d_C, mem_size_C); if (error != cudaSuccess) { printf("cudaMalloc d_C returned error code %d, line(%d)\n", error, __LINE__); exit(EXIT_FAILURE); } // copy host memory to device error = cudaMemcpy(d_A, h_A, mem_size_A, cudaMemcpyHostToDevice); if (error != cudaSuccess) { printf("cudaMemcpy (d_A,h_A) returned error code %d, line(%d)\n", error, __LINE__); exit(EXIT_FAILURE); } error = cudaMemcpy(d_B, h_B, mem_size_B, cudaMemcpyHostToDevice); if (error != cudaSuccess) { printf("cudaMemcpy (d_B,h_B) returned error code %d, line(%d)\n", error, __LINE__); exit(EXIT_FAILURE); } // Setup execution parameters dim3 threads(block_size, block_size); dim3 grid(dimsB.x / threads.x, dimsA.y / threads.y); // Create and start timer printf("Computing result using CUDA Kernel...\n"); // Performs warmup operation using matrixMul CUDA kernel if (block_size == 16) { matrixMulCUDA<16> << < grid, threads >> >(d_C, d_A, d_B, dimsA.x, dimsB.x); } else { matrixMulCUDA<32> << < grid, threads >> >(d_C, d_A, d_B, dimsA.x, dimsB.x); } printf("done\n"); cudaDeviceSynchronize(); // Allocate CUDA events that we'll use for timing cudaEvent_t start; error = cudaEventCreate(&start); if (error != cudaSuccess) { fprintf(stderr, "Failed to create start event (error code %s)!\n", cudaGetErrorString(error)); exit(EXIT_FAILURE); } cudaEvent_t stop; error = cudaEventCreate(&stop); if (error != cudaSuccess) { fprintf(stderr, "Failed to create stop event (error code %s)!\n", cudaGetErrorString(error)); exit(EXIT_FAILURE); } // Record the start event error = cudaEventRecord(start, NULL); if (error != cudaSuccess) { fprintf(stderr, "Failed to record start event (error code %s)!\n", cudaGetErrorString(error)); exit(EXIT_FAILURE); } // Execute the kernel int nIter = 300; for (int j = 0; j < nIter; j++) { if (block_size == 16) { matrixMulCUDA<16> << < grid, threads >> >(d_C, d_A, d_B, dimsA.x, dimsB.x); } else { matrixMulCUDA<32> << < grid, threads >> >(d_C, d_A, d_B, dimsA.x, dimsB.x); } } // Record the stop event error = cudaEventRecord(stop, NULL); if (error != cudaSuccess) { fprintf(stderr, "Failed to record stop event (error code %s)!\n", cudaGetErrorString(error)); exit(EXIT_FAILURE); } // Wait for the stop event to complete error = cudaEventSynchronize(stop); if (error != cudaSuccess) { fprintf(stderr, "Failed to synchronize on the stop event (error code %s)!\n", cudaGetErrorString(error)); exit(EXIT_FAILURE); } float msecTotal = 0.0f; error = cudaEventElapsedTime(&msecTotal, start, stop); if (error != cudaSuccess) { fprintf(stderr, "Failed to get time elapsed between events (error code %s)!\n", cudaGetErrorString(error)); exit(EXIT_FAILURE); } // Compute and print the performance float msecPerMatrixMul = msecTotal / nIter; double flopsPerMatrixMul = 2.0 * (double)dimsA.x * (double)dimsA.y * (double)dimsB.x; double gigaFlops = (flopsPerMatrixMul * 1.0e-9f) / (msecPerMatrixMul / 1000.0f); printf( "Performance= %.2f GFlop/s, Time= %.3f msec, Size= %.0f Ops, WorkgroupSize= %u threads/block\n", gigaFlops, msecPerMatrixMul, flopsPerMatrixMul, threads.x * threads.y); // Copy result from device to host error = cudaMemcpy(h_C, d_C, mem_size_C, cudaMemcpyDeviceToHost); if (error != cudaSuccess) { printf("cudaMemcpy (h_C,d_C) returned error code %d, line(%d)\n", error, __LINE__); exit(EXIT_FAILURE); } printf("Checking computed result for correctness: "); bool correct = true; // test relative error by the formula // |<x, y>_cpu - <x,y>_gpu|/<|x|, |y|> < eps double eps = 1.e-6; // machine zero for (int i = 0; i < (int)(dimsC.x * dimsC.y); i++) { double abs_err = fabs(h_C[i] - (dimsA.x * valB)); double dot_length = dimsA.x; double abs_val = fabs(h_C[i]); double rel_err = abs_err / abs_val / dot_length; if (rel_err > eps) { printf("Error! Matrix[%05d]=%.8f, ref=%.8f error term is > %E\n", i, h_C[i], dimsA.x*valB, eps); correct = false; } } printf("%s\n", correct ? "Result = PASS" : "Result = FAIL"); // Clean up memory free(h_A); free(h_B); free(h_C); cudaFree(d_A); cudaFree(d_B); cudaFree(d_C); printf("\nNOTE: The CUDA Samples are not meant for performance measurements. Results may vary when GPU Boost is enabled.\n"); // cudaDeviceReset causes the driver to clean up all state. While // not mandatory in normal operation, it is good practice. It is also // needed to ensure correct operation when the application is being // profiled. Calling cudaDeviceReset causes all profile data to be // flushed before the application exits cudaDeviceReset(); if (correct) { return EXIT_SUCCESS; } else { return EXIT_FAILURE; } } /** * Program main */ int main(int argc, char **argv) { printf("[Matrix Multiply Using CUDA] - Starting...\n"); if (checkCmdLineFlag(argc, (const char **)argv, "help") || checkCmdLineFlag(argc, (const char **)argv, "?")) { printf("Usage -device=n (n >= 0 for deviceID)\n"); printf(" -wA=WidthA -hA=HeightA (Width x Height of Matrix A)\n"); printf(" -wB=WidthB -hB=HeightB (Width x Height of Matrix B)\n"); printf(" Note: Outer matrix dimensions of A & B matrices must be equal.\n"); exit(EXIT_SUCCESS); } // By default, we use device 0, otherwise we override the device ID based on what is provided at the command line int devID = 0; if (checkCmdLineFlag(argc, (const char **)argv, "device")) { devID = getCmdLineArgumentInt(argc, (const char **)argv, "device"); cudaSetDevice(devID); } cudaError_t error; cudaDeviceProp deviceProp; error = cudaGetDevice(&devID); if (error != cudaSuccess) { printf("cudaGetDevice returned error code %d, line(%d)\n", error, __LINE__); } error = cudaGetDeviceProperties(&deviceProp, devID); if (deviceProp.computeMode == cudaComputeModeProhibited) { fprintf(stderr, "Error: device is running in <Compute Mode Prohibited>, no threads can use ::cudaSetDevice().\n"); exit(EXIT_SUCCESS); } if (error != cudaSuccess) { printf("cudaGetDeviceProperties returned error code %d, line(%d)\n", error, __LINE__); } else { printf("GPU Device %d: \"%s\" with compute capability %d.%d\n\n", devID, deviceProp.name, deviceProp.major, deviceProp.minor); } // Use a larger block size for Fermi and above int block_size = (deviceProp.major < 2) ? 16 : 32; dim3 dimsA(5 * 2 * block_size, 5 * 2 * block_size, 1); dim3 dimsB(5 * 4 * block_size, 5 * 2 * block_size, 1); // width of Matrix A if (checkCmdLineFlag(argc, (const char **)argv, "wA")) { dimsA.x = getCmdLineArgumentInt(argc, (const char **)argv, "wA"); } // height of Matrix A if (checkCmdLineFlag(argc, (const char **)argv, "hA")) { dimsA.y = getCmdLineArgumentInt(argc, (const char **)argv, "hA"); } // width of Matrix B if (checkCmdLineFlag(argc, (const char **)argv, "wB")) { dimsB.x = getCmdLineArgumentInt(argc, (const char **)argv, "wB"); } // height of Matrix B if (checkCmdLineFlag(argc, (const char **)argv, "hB")) { dimsB.y = getCmdLineArgumentInt(argc, (const char **)argv, "hB"); } if (dimsA.x != dimsB.y) { printf("Error: outer matrix dimensions must be equal. (%d != %d)\n", dimsA.x, dimsB.y); exit(EXIT_FAILURE); } printf("MatrixA(%d,%d), MatrixB(%d,%d)\n", dimsA.x, dimsA.y, dimsB.x, dimsB.y); int matrix_result = matrixMultiply(argc, argv, block_size, dimsA, dimsB); exit(matrix_result); }
7d1839c393eff90e2ea184bd4531f1a076200e4e.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include "device_launch_parameters.h" #include "hiprand/hiprand.h" #include "hiprand/hiprand_kernel.h" #include <hiprand/hiprand.h> #include <ctime> #include <cstdio> #include <iostream> using namespace std; __global__ void addTen(float* d, int count) { int threadsPerBlock = blockDim.x * blockDim.y * blockDim.z; int threadPosInBlock = threadIdx.x + blockDim.x * threadIdx.y + blockDim.x * blockDim.y * threadIdx.z; int blockPosInGrid = blockIdx.x + gridDim.x * blockIdx.y + gridDim.x * gridDim.y * blockIdx.z; int tid = blockPosInGrid * threadsPerBlock + threadPosInBlock; if(tid < count) { d[tid] = d[tid] + 10; } } int main() { hiprandGenerator_t gen; hiprandCreateGenerator(&gen, HIPRAND_RNG_PSEUDO_MTGP32); hiprandSetPseudoRandomGeneratorSeed(gen, time(0)); hipError_t status; const int count = 123456; const int size = count * sizeof(float); float* d; float h[count]; hipMalloc(&d, size); hiprandGenerateUniform(gen, d, count); dim3 block(8, 8, 8); dim3 grid(16, 16); hipLaunchKernelGGL(( addTen), dim3(grid), dim3(block), 0, 0, d, count); status = hipMemcpy(h, d, size, hipMemcpyDeviceToHost); hipFree(d); for (int i = 0; i < 100; i++) { cout << h[i] << '\t'; } return 0; }
7d1839c393eff90e2ea184bd4531f1a076200e4e.cu
#include "cuda_runtime.h" #include "device_launch_parameters.h" #include "curand.h" #include "curand_kernel.h" #include <curand.h> #include <ctime> #include <cstdio> #include <iostream> using namespace std; __global__ void addTen(float* d, int count) { int threadsPerBlock = blockDim.x * blockDim.y * blockDim.z; int threadPosInBlock = threadIdx.x + blockDim.x * threadIdx.y + blockDim.x * blockDim.y * threadIdx.z; int blockPosInGrid = blockIdx.x + gridDim.x * blockIdx.y + gridDim.x * gridDim.y * blockIdx.z; int tid = blockPosInGrid * threadsPerBlock + threadPosInBlock; if(tid < count) { d[tid] = d[tid] + 10; } } int main() { curandGenerator_t gen; curandCreateGenerator(&gen, CURAND_RNG_PSEUDO_MTGP32); curandSetPseudoRandomGeneratorSeed(gen, time(0)); cudaError_t status; const int count = 123456; const int size = count * sizeof(float); float* d; float h[count]; cudaMalloc(&d, size); curandGenerateUniform(gen, d, count); dim3 block(8, 8, 8); dim3 grid(16, 16); addTen<<<grid, block>>>(d, count); status = cudaMemcpy(h, d, size, cudaMemcpyDeviceToHost); cudaFree(d); for (int i = 0; i < 100; i++) { cout << h[i] << '\t'; } return 0; }
792096c993f379eae3de5faea466668b9ca0e5bf.hip
// !!! This is a file automatically generated by hipify!!! #include <stdio.h> #include <hip/hip_runtime_api.h> #include <time.h> /**************************************************************************** * An experiment with cuda kernel invocation parameters. 2x3x4 threads on * one block should yield 24 kernel invocations. * * Compile with: * nvcc -o 2_1_a_3initial 2_1_a_3initial.cu * * Dr Kevan Buckley, University of Wolverhampton, January 2018 *****************************************************************************/ __device__ int is_a_match(char *attempt){ char password1[] ="AA11"; char password2[] ="BB22"; char password3[] ="CC33"; char password4[] ="DD44"; char *a = attempt; char *b = attempt; char *c = attempt; char *d = attempt; char *pass1 = password1; char *pass2 = password2; char *pass3 = password3; char *pass4 = password4; while(*a ==*pass1){ if(*a == '\0') { printf("password:%s\n", password1); break; } a++; pass1++; } while(*b ==*pass2){ if(*b == '\0') { printf("password:%s\n", password2); break; } b++; pass2++; } while(*c ==*pass3){ if(*c == '\0') { printf("password:%s\n", password3); break; } c++; pass3++; } while(*d ==*pass4){ if(*d == '\0') { printf("password: %s\n",password4); return 1; } d++; pass4++; } return 0; } __global__ void kernel(){ char i1, i2; char password[7]; password[6] ='\0'; int i = blockIdx.x +65; int j = threadIdx.x+65; char firstMatch =i; char secondMatch =j; password[0] =firstMatch; password[1] =secondMatch; for(i1='0'; i1<='9'; i1++){ for(i2='0'; i2<='9'; i2++){ password[2] =i1; password[3] =i2; if(is_a_match(password)){ } else{ //printf("tried: %s\n",password); } } } } int time_difference(struct timespec *start, struct timespec *finish,long long int *difference) { long long int ds = finish->tv_sec - start->tv_sec; long long int dn = finish->tv_nsec - start->tv_nsec; if(dn < 0 ) { ds--; dn += 1000000000; } *difference = ds * 1000000000 + dn; return !(*difference > 0); } int main() { struct timespec start, finish; long long int time_elapsed; clock_gettime(CLOCK_MONOTONIC, &start); hipLaunchKernelGGL(( kernel), dim3(26),dim3(26), 0, 0, ); hipDeviceSynchronize(); clock_gettime(CLOCK_MONOTONIC, &finish); time_difference(&start, &finish, &time_elapsed); printf("Time elapsed was %lldns or %0.9lfs\n", time_elapsed, (time_elapsed/1.0e9)); return 0; }
792096c993f379eae3de5faea466668b9ca0e5bf.cu
#include <stdio.h> #include <cuda_runtime_api.h> #include <time.h> /**************************************************************************** * An experiment with cuda kernel invocation parameters. 2x3x4 threads on * one block should yield 24 kernel invocations. * * Compile with: * nvcc -o 2_1_a_3initial 2_1_a_3initial.cu * * Dr Kevan Buckley, University of Wolverhampton, January 2018 *****************************************************************************/ __device__ int is_a_match(char *attempt){ char password1[] ="AA11"; char password2[] ="BB22"; char password3[] ="CC33"; char password4[] ="DD44"; char *a = attempt; char *b = attempt; char *c = attempt; char *d = attempt; char *pass1 = password1; char *pass2 = password2; char *pass3 = password3; char *pass4 = password4; while(*a ==*pass1){ if(*a == '\0') { printf("password:%s\n", password1); break; } a++; pass1++; } while(*b ==*pass2){ if(*b == '\0') { printf("password:%s\n", password2); break; } b++; pass2++; } while(*c ==*pass3){ if(*c == '\0') { printf("password:%s\n", password3); break; } c++; pass3++; } while(*d ==*pass4){ if(*d == '\0') { printf("password: %s\n",password4); return 1; } d++; pass4++; } return 0; } __global__ void kernel(){ char i1, i2; char password[7]; password[6] ='\0'; int i = blockIdx.x +65; int j = threadIdx.x+65; char firstMatch =i; char secondMatch =j; password[0] =firstMatch; password[1] =secondMatch; for(i1='0'; i1<='9'; i1++){ for(i2='0'; i2<='9'; i2++){ password[2] =i1; password[3] =i2; if(is_a_match(password)){ } else{ //printf("tried: %s\n",password); } } } } int time_difference(struct timespec *start, struct timespec *finish,long long int *difference) { long long int ds = finish->tv_sec - start->tv_sec; long long int dn = finish->tv_nsec - start->tv_nsec; if(dn < 0 ) { ds--; dn += 1000000000; } *difference = ds * 1000000000 + dn; return !(*difference > 0); } int main() { struct timespec start, finish; long long int time_elapsed; clock_gettime(CLOCK_MONOTONIC, &start); kernel<<<26,26>>>(); cudaThreadSynchronize(); clock_gettime(CLOCK_MONOTONIC, &finish); time_difference(&start, &finish, &time_elapsed); printf("Time elapsed was %lldns or %0.9lfs\n", time_elapsed, (time_elapsed/1.0e9)); return 0; }
842d654f21d08d1aeaad9e5490e61a403b9edc20.hip
// !!! This is a file automatically generated by hipify!!! #ifndef _BACKPROP_CUDA_KERNEL_H_ #define _BACKPROP_CUDA_KERNEL_H_ #include <stdio.h> #include "backprop.h" #include "math.h" #include "hip/hip_runtime.h" __global__ void bpnn_layerforward_CUDA(float *input_cuda, float *output_hidden_cuda, float *input_hidden_cuda, float *hidden_partial_sum, int in, int hid) { int by = blockIdx.y; int tx = threadIdx.x; int ty = threadIdx.y; int index = ( hid + 1 ) * HEIGHT * by + ( hid + 1 ) * ty + tx + 1 + ( hid + 1 ) ; int index_in = HEIGHT * by + ty + 1; __shared__ float input_node[HEIGHT]; __shared__ float weight_matrix[HEIGHT][WIDTH]; if ( tx == 0 ) input_node[ty] = input_cuda[index_in] ; __syncthreads(); weight_matrix[ty][tx] = input_hidden_cuda[index]; __syncthreads(); weight_matrix[ty][tx] = weight_matrix[ty][tx] * input_node[ty]; __syncthreads(); for ( int i = 1 ; i <= __log2f(HEIGHT) ; i++){ int power_two = __powf(2, i); if( ty % power_two == 0 ) weight_matrix[ty][tx] = weight_matrix[ty][tx] + weight_matrix[ty + power_two/2][tx]; __syncthreads(); } __syncthreads(); if ( tx == 0 ) { hidden_partial_sum[by * hid + ty] = weight_matrix[tx][ty]; } } __global__ void bpnn_adjust_weights_cuda(float * delta, int hid, float * ly, int in, float * w, float * oldw) { int by = blockIdx.y; int tx = threadIdx.x; int ty = threadIdx.y; int index = ( hid + 1 ) * HEIGHT * by + ( hid + 1 ) * ty + tx + 1 + ( hid + 1 ) ; int index_y = HEIGHT * by + ty + 1; int index_x = tx + 1; //eta = 0.3; //momentum = 0.3; w[index] += ((ETA * delta[index_x] * ly[index_y]) + (MOMENTUM * oldw[index])); oldw[index] = ((ETA * delta[index_x] * ly[index_y]) + (MOMENTUM * oldw[index])); __syncthreads(); if (ty == 0 && by ==0){ w[index_x] += ((ETA * delta[index_x]) + (MOMENTUM * oldw[index_x])); oldw[index_x] = ((ETA * delta[index_x]) + (MOMENTUM * oldw[index_x])); } } #endif
842d654f21d08d1aeaad9e5490e61a403b9edc20.cu
#ifndef _BACKPROP_CUDA_KERNEL_H_ #define _BACKPROP_CUDA_KERNEL_H_ #include <stdio.h> #include "backprop.h" #include "math.h" #include "cuda.h" __global__ void bpnn_layerforward_CUDA(float *input_cuda, float *output_hidden_cuda, float *input_hidden_cuda, float *hidden_partial_sum, int in, int hid) { int by = blockIdx.y; int tx = threadIdx.x; int ty = threadIdx.y; int index = ( hid + 1 ) * HEIGHT * by + ( hid + 1 ) * ty + tx + 1 + ( hid + 1 ) ; int index_in = HEIGHT * by + ty + 1; __shared__ float input_node[HEIGHT]; __shared__ float weight_matrix[HEIGHT][WIDTH]; if ( tx == 0 ) input_node[ty] = input_cuda[index_in] ; __syncthreads(); weight_matrix[ty][tx] = input_hidden_cuda[index]; __syncthreads(); weight_matrix[ty][tx] = weight_matrix[ty][tx] * input_node[ty]; __syncthreads(); for ( int i = 1 ; i <= __log2f(HEIGHT) ; i++){ int power_two = __powf(2, i); if( ty % power_two == 0 ) weight_matrix[ty][tx] = weight_matrix[ty][tx] + weight_matrix[ty + power_two/2][tx]; __syncthreads(); } __syncthreads(); if ( tx == 0 ) { hidden_partial_sum[by * hid + ty] = weight_matrix[tx][ty]; } } __global__ void bpnn_adjust_weights_cuda(float * delta, int hid, float * ly, int in, float * w, float * oldw) { int by = blockIdx.y; int tx = threadIdx.x; int ty = threadIdx.y; int index = ( hid + 1 ) * HEIGHT * by + ( hid + 1 ) * ty + tx + 1 + ( hid + 1 ) ; int index_y = HEIGHT * by + ty + 1; int index_x = tx + 1; //eta = 0.3; //momentum = 0.3; w[index] += ((ETA * delta[index_x] * ly[index_y]) + (MOMENTUM * oldw[index])); oldw[index] = ((ETA * delta[index_x] * ly[index_y]) + (MOMENTUM * oldw[index])); __syncthreads(); if (ty == 0 && by ==0){ w[index_x] += ((ETA * delta[index_x]) + (MOMENTUM * oldw[index_x])); oldw[index_x] = ((ETA * delta[index_x]) + (MOMENTUM * oldw[index_x])); } } #endif
4f1fd552f2e6fd36db8ce62aaf49d935261190e5.hip
// !!! This is a file automatically generated by hipify!!! // This file is auto-generated. See "generate_kernels.sh" #include <ATen/native/transformers/hip/mem_eff_attention/kernel_backward.h> INSTANTIATE_ATTENTION_KERNEL_BACKWARD_SM50(cutlass::half_t, false); INSTANTIATE_ATTENTION_KERNEL_BACKWARD_SM70(cutlass::half_t, false); INSTANTIATE_ATTENTION_KERNEL_BACKWARD_SM75(cutlass::half_t, false); INSTANTIATE_ATTENTION_KERNEL_BACKWARD_SM80(cutlass::half_t, false);
4f1fd552f2e6fd36db8ce62aaf49d935261190e5.cu
// This file is auto-generated. See "generate_kernels.sh" #include <ATen/native/transformers/cuda/mem_eff_attention/kernel_backward.h> INSTANTIATE_ATTENTION_KERNEL_BACKWARD_SM50(cutlass::half_t, false); INSTANTIATE_ATTENTION_KERNEL_BACKWARD_SM70(cutlass::half_t, false); INSTANTIATE_ATTENTION_KERNEL_BACKWARD_SM75(cutlass::half_t, false); INSTANTIATE_ATTENTION_KERNEL_BACKWARD_SM80(cutlass::half_t, false);
798d51a61e59993c1ef884b2daa522ff19f4ea60.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /** * \file dnn/src/cuda/repeat/repeat.cu * MegEngine is Licensed under the Apache License, Version 2.0 (the "License") * * Copyright (c) 2014-2020 Megvii Inc. All rights reserved. * * Unless required by applicable law or agreed to in writing, * software distributed under the License is distributed on an * "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. */ #include "src/cuda/repeat/repeat.cuh" #include "src/cuda/utils.cuh" #include <numeric> #include <functional> #include <stdint.h> #include "megdnn/dtype.h" namespace megdnn { namespace cuda { namespace repeat { template <typename T> __global__ void forward_kernel_1d(const T *src, T *dst, uint32_t sshape, uint32_t dshape, uint32_t tshape) { uint32_t di = threadIdx.x + blockIdx.x * blockDim.x; uint32_t si = di / tshape; if (di < dshape) { dst[di] = src[si]; } } template <typename T> void forward_proxy_1d(const T *src, T *dst, size_t sshape, size_t dshape, size_t tshape, hipStream_t stream) { size_t NR_BLOCKS = DIVUP(dshape, NR_THREADS); hipLaunchKernelGGL(( forward_kernel_1d<T>), dim3(NR_BLOCKS), dim3(NR_THREADS), 0, stream, src, dst, sshape, dshape, tshape); } template <typename T> __global__ void forward_kernel_2d(const T *src, T *dst, uint32_t sshape0, uint32_t sshape1, uint32_t dshape0, uint32_t dshape1, uint32_t tshape0, uint32_t tshape1) { uint32_t dix = threadIdx.x + blockIdx.x * blockDim.x; uint32_t diy = threadIdx.y + blockIdx.y * blockDim.y; uint32_t six = dix / tshape0; uint32_t siy = diy / tshape1; uint32_t diz = diy * dshape0 + dix; uint32_t siz = siy * sshape0 + six; if (dix < dshape0 && diy < dshape1) { dst[diz] = src[siz]; } } template <typename T> void forward_proxy_2d(const T *src, T *dst, size_t sshape0, size_t sshape1, size_t dshape0, size_t dshape1, size_t tshape0, size_t tshape1, hipStream_t stream) { dim3 threads(NR_THREADS_X, NR_THREADS_Y); dim3 blocks(DIVUP(dshape0, threads.x), DIVUP(dshape1, threads.y)); hipLaunchKernelGGL(( forward_kernel_2d<T>), dim3(blocks), dim3(threads), 0, stream, src, dst, sshape0, sshape1, dshape0, dshape1, tshape0, tshape1); } template <typename T, uint32_t ndim> __global__ void forward_kernel_generic_tpl(const T * __restrict__ src, T * __restrict__ dst, uint32_t n, array_wrapper<uint32_t, ndim> sshape, array_wrapper<uint32_t, ndim> dshape, array_wrapper<uint32_t, ndim> tshape) { uint32_t tidx = threadIdx.x + blockIdx.x * blockDim.x; if (tidx < n) { uint32_t didx = tidx; uint32_t sidx = 0; uint32_t base = 1; // calculate index #pragma unroll for (size_t i = ndim; i > 0; --i) { size_t cidx = didx % dshape.data[i-1] / tshape.data[i-1]; sidx += cidx * base; base *= sshape.data[i-1]; didx /= dshape.data[i-1]; } dst[tidx] = src[sidx]; } } template <typename T, size_t ndim> void forward_proxy_generic_tpl(const T *src, T *dst, const size_t *sshape_, const size_t *dshape_, const size_t *tshape_, hipStream_t stream) { array_wrapper<uint32_t, ndim> sshape, dshape, tshape; for (size_t i = 0; i < ndim; ++i) sshape.data[i] = sshape_[i]; for (size_t i = 0; i < ndim; ++i) dshape.data[i] = dshape_[i]; for (size_t i = 0; i < ndim; ++i) tshape.data[i] = tshape_[i]; size_t n = std::accumulate(dshape_, dshape_ + ndim, size_t(1), std::multiplies<size_t>()); size_t NR_BLOCKS = DIVUP(n, NR_THREADS); hipLaunchKernelGGL(( forward_kernel_generic_tpl<T, ndim>), dim3(NR_BLOCKS), dim3(NR_THREADS), 0, stream, src, dst, n, sshape, dshape, tshape); } template <typename T> void forward_proxy_generic(const T *src, T *dst, size_t ndim, const size_t *sshape_, const size_t *dshape_, const size_t *tshape_, hipStream_t stream) { #define CASE(ndim) \ case ndim: \ forward_proxy_generic_tpl<T, ndim>(src, dst, \ sshape_, dshape_, tshape_, stream); \ break; switch (ndim) { CASE(2); CASE(3); CASE(4); CASE(5); CASE(6); default: megdnn_assert_internal(false); } #undef CASE } template <typename T> void forward_proxy(const T *src, T *dst, size_t ndim, const size_t *sshape_, const size_t *dshape_, const size_t *tshape_, hipStream_t stream) { if (ndim == 1) { forward_proxy_1d<T>(src, dst, sshape_[0], dshape_[0], tshape_[0], stream); } else if (ndim == 2 && dshape_[0] <= 65535 * NR_THREADS_Y) { // CUDA can launch 65535 blocks along axis Y at most. // Note that the index 1 and 0 are swapped, it is because in the kernel, // index zero corresponds to axis X (which is the stride=1 axis), // and index one corresponds to axis Y. However, outside the kernel, // our representation is the opposite. forward_proxy_2d<T>(src, dst, sshape_[1], sshape_[0], dshape_[1], dshape_[0], tshape_[1], tshape_[0], stream); } else { forward_proxy_generic<T>(src, dst, ndim, sshape_, dshape_, tshape_, stream); } after_kernel_launch(); } #define INST(T) \ template void forward_proxy<T>(const T *src, T *dst, size_t ndim, \ const size_t *sshape_, const size_t *dshape_, const size_t *tshape_, \ hipStream_t stream); #define cb(DType) INST(typename DTypeTrait<DType>::ctype) MEGDNN_FOREACH_COMPUTING_DTYPE(cb) #undef cb #undef INST } // namespace repeat } // namespace cuda } // namespace megdnn // vim: syntax=cpp.doxygen
798d51a61e59993c1ef884b2daa522ff19f4ea60.cu
/** * \file dnn/src/cuda/repeat/repeat.cu * MegEngine is Licensed under the Apache License, Version 2.0 (the "License") * * Copyright (c) 2014-2020 Megvii Inc. All rights reserved. * * Unless required by applicable law or agreed to in writing, * software distributed under the License is distributed on an * "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. */ #include "src/cuda/repeat/repeat.cuh" #include "src/cuda/utils.cuh" #include <numeric> #include <functional> #include <stdint.h> #include "megdnn/dtype.h" namespace megdnn { namespace cuda { namespace repeat { template <typename T> __global__ void forward_kernel_1d(const T *src, T *dst, uint32_t sshape, uint32_t dshape, uint32_t tshape) { uint32_t di = threadIdx.x + blockIdx.x * blockDim.x; uint32_t si = di / tshape; if (di < dshape) { dst[di] = src[si]; } } template <typename T> void forward_proxy_1d(const T *src, T *dst, size_t sshape, size_t dshape, size_t tshape, cudaStream_t stream) { size_t NR_BLOCKS = DIVUP(dshape, NR_THREADS); forward_kernel_1d<T><<<NR_BLOCKS, NR_THREADS, 0, stream>>>(src, dst, sshape, dshape, tshape); } template <typename T> __global__ void forward_kernel_2d(const T *src, T *dst, uint32_t sshape0, uint32_t sshape1, uint32_t dshape0, uint32_t dshape1, uint32_t tshape0, uint32_t tshape1) { uint32_t dix = threadIdx.x + blockIdx.x * blockDim.x; uint32_t diy = threadIdx.y + blockIdx.y * blockDim.y; uint32_t six = dix / tshape0; uint32_t siy = diy / tshape1; uint32_t diz = diy * dshape0 + dix; uint32_t siz = siy * sshape0 + six; if (dix < dshape0 && diy < dshape1) { dst[diz] = src[siz]; } } template <typename T> void forward_proxy_2d(const T *src, T *dst, size_t sshape0, size_t sshape1, size_t dshape0, size_t dshape1, size_t tshape0, size_t tshape1, cudaStream_t stream) { dim3 threads(NR_THREADS_X, NR_THREADS_Y); dim3 blocks(DIVUP(dshape0, threads.x), DIVUP(dshape1, threads.y)); forward_kernel_2d<T><<<blocks, threads, 0, stream>>>(src, dst, sshape0, sshape1, dshape0, dshape1, tshape0, tshape1); } template <typename T, uint32_t ndim> __global__ void forward_kernel_generic_tpl(const T * __restrict__ src, T * __restrict__ dst, uint32_t n, array_wrapper<uint32_t, ndim> sshape, array_wrapper<uint32_t, ndim> dshape, array_wrapper<uint32_t, ndim> tshape) { uint32_t tidx = threadIdx.x + blockIdx.x * blockDim.x; if (tidx < n) { uint32_t didx = tidx; uint32_t sidx = 0; uint32_t base = 1; // calculate index #pragma unroll for (size_t i = ndim; i > 0; --i) { size_t cidx = didx % dshape.data[i-1] / tshape.data[i-1]; sidx += cidx * base; base *= sshape.data[i-1]; didx /= dshape.data[i-1]; } dst[tidx] = src[sidx]; } } template <typename T, size_t ndim> void forward_proxy_generic_tpl(const T *src, T *dst, const size_t *sshape_, const size_t *dshape_, const size_t *tshape_, cudaStream_t stream) { array_wrapper<uint32_t, ndim> sshape, dshape, tshape; for (size_t i = 0; i < ndim; ++i) sshape.data[i] = sshape_[i]; for (size_t i = 0; i < ndim; ++i) dshape.data[i] = dshape_[i]; for (size_t i = 0; i < ndim; ++i) tshape.data[i] = tshape_[i]; size_t n = std::accumulate(dshape_, dshape_ + ndim, size_t(1), std::multiplies<size_t>()); size_t NR_BLOCKS = DIVUP(n, NR_THREADS); forward_kernel_generic_tpl<T, ndim><<<NR_BLOCKS, NR_THREADS, 0, stream>>>( src, dst, n, sshape, dshape, tshape); } template <typename T> void forward_proxy_generic(const T *src, T *dst, size_t ndim, const size_t *sshape_, const size_t *dshape_, const size_t *tshape_, cudaStream_t stream) { #define CASE(ndim) \ case ndim: \ forward_proxy_generic_tpl<T, ndim>(src, dst, \ sshape_, dshape_, tshape_, stream); \ break; switch (ndim) { CASE(2); CASE(3); CASE(4); CASE(5); CASE(6); default: megdnn_assert_internal(false); } #undef CASE } template <typename T> void forward_proxy(const T *src, T *dst, size_t ndim, const size_t *sshape_, const size_t *dshape_, const size_t *tshape_, cudaStream_t stream) { if (ndim == 1) { forward_proxy_1d<T>(src, dst, sshape_[0], dshape_[0], tshape_[0], stream); } else if (ndim == 2 && dshape_[0] <= 65535 * NR_THREADS_Y) { // CUDA can launch 65535 blocks along axis Y at most. // Note that the index 1 and 0 are swapped, it is because in the kernel, // index zero corresponds to axis X (which is the stride=1 axis), // and index one corresponds to axis Y. However, outside the kernel, // our representation is the opposite. forward_proxy_2d<T>(src, dst, sshape_[1], sshape_[0], dshape_[1], dshape_[0], tshape_[1], tshape_[0], stream); } else { forward_proxy_generic<T>(src, dst, ndim, sshape_, dshape_, tshape_, stream); } after_kernel_launch(); } #define INST(T) \ template void forward_proxy<T>(const T *src, T *dst, size_t ndim, \ const size_t *sshape_, const size_t *dshape_, const size_t *tshape_, \ cudaStream_t stream); #define cb(DType) INST(typename DTypeTrait<DType>::ctype) MEGDNN_FOREACH_COMPUTING_DTYPE(cb) #undef cb #undef INST } // namespace repeat } // namespace cuda } // namespace megdnn // vim: syntax=cpp.doxygen
b579eef06cc1954b44e64f72476790c8def52f25.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include <cstdint> #include <cassert> #include <chrono> #include <fmt/format.h> typedef uint32_t u32; typedef uint16_t u16; typedef uint8_t u8; struct ForcerContext { u8 rng_state[4]; u8 work_buffer[8*8]; u8 decompress_buffer[0x30]; u8 preloaded_map[8*8*9]; u8* found_byte; }; static const unsigned seed_count = 0xfffffff + 1; static const unsigned block_count = 32; static const unsigned threads_per_block = 1024; static const unsigned core_count = block_count * threads_per_block; static const unsigned seeds_per_thread = seed_count / core_count; static const unsigned context_size = sizeof(ForcerContext) * core_count; __device__ u8 rng_next(ForcerContext* ctx) { if(!ctx) return 0x0; ctx->rng_state[0]++; ctx->rng_state[1] = ctx->rng_state[3] ^ ctx->rng_state[0] ^ ctx->rng_state[1]; ctx->rng_state[2] = ctx->rng_state[1] + ctx->rng_state[2]; ctx->rng_state[3] = ctx->rng_state[3] + ((ctx->rng_state[2]>>1) ^ ctx->rng_state[1]); return ctx->rng_state[3]; } __device__ void rng_reinitialize(ForcerContext* ctx, u32 seed) { if(!ctx) return; ctx->rng_state[3] = seed & 0xffu; ctx->rng_state[2] = (seed >> 8u) & 0xffu; ctx->rng_state[1] = (seed >> 16u) & 0xffu; ctx->rng_state[0] = (seed >> 24u) & 0xffu; for(unsigned i = 0; i < 0x10; ++i) rng_next(ctx); } __device__ u8 gen_ctl(ForcerContext* ctx, u32 seed, u16 x, u16 y) { if(!ctx) return 0x0; u8 masked_x = x & 3, masked_y = y & 3; u32 mask = ((u32)(y & 0xFFFC) << 16u) | (x & 0xFFFC); rng_reinitialize(ctx, seed ^ mask); u8 b1 = rng_next(ctx) & 7; static const u8 dd0c_lookup[128] = { 0x05,0x0B,0x06,0x00,0x0E,0x05,0x0B,0x03,0x09,0x0E,0x00,0x00,0x00,0x0C,0x00,0x00,0x00,0x0C,0x05,0x06,0x06,0x0D,0x0A,0x09,0x09,0x0B,0x06,0x00,0x00,0x05,0x0A,0x00,0x05,0x0B,0x06,0x00,0x0B,0x06,0x09,0x07,0x00,0x09,0x07,0x0A,0x00,0x05,0x0A,0x00,0x00,0x09,0x06,0x00,0x03,0x06,0x0C,0x05,0x00,0x09,0x0F,0x0A,0x00,0x05,0x0A,0x00,0x05,0x0A,0x00,0x00,0x0F,0x06,0x05,0x03,0x09,0x0F,0x0A,0x00,0x00,0x0C,0x00,0x00,0x05,0x0B,0x03,0x06,0x0E,0x00,0x00,0x0D,0x09,0x06,0x00,0x0C,0x00,0x0D,0x03,0x0A,0x00,0x09,0x06,0x00,0x06,0x05,0x0A,0x05,0x09,0x0F,0x06,0x0C,0x00,0x0C,0x09,0x0A,0x00,0x0C,0x00,0x00,0x03,0x0F,0x07,0x03,0x00,0x0D,0x0A,0x00,0x00,0x0C,0x00,0x00 }; auto offset = 4 * masked_y + masked_x; auto table_offset = offset + (b1 << 4u); u8 lookup_value = dd0c_lookup[table_offset]; u8 b2 = rng_next(ctx) & 0x30; b2 |= lookup_value; auto result = (((x&0xFF) | (y&0xFF)) & 0xFC) | ((x>>8u) | (y>>8u)); if(result != 0) return b2; else return b2 & 0x0F; } __device__ void map_decompress(ForcerContext* ctx, u8 new_tile) { if(!ctx) return; // Copy tiles to not trample over things while modifying memcpy(&ctx->decompress_buffer[0], &ctx->work_buffer[8], 0x30); for(unsigned i = 8; i < 8*7; ++i) { if(ctx->decompress_buffer[i - 8] != new_tile) continue; if(((i&7) == 0) || ((i&7) == 7)) continue; u8 v = rng_next(ctx); if(v & 1) { ctx->work_buffer[i-1] = new_tile; } if(v & 2) { ctx->work_buffer[i+1] = new_tile; } if(v & 4) { ctx->work_buffer[i-8] = new_tile; } if(v & 8) { ctx->work_buffer[i+8] = new_tile; } } } __device__ void map_place_tile_prob(ForcerContext* ctx, u8 old, u8 new_tile, u8 threshold) { if(!ctx) return; for(unsigned i = 0; i < 8*8; ++i) { if(ctx->work_buffer[i] != old) continue; u8 v = rng_next(ctx); if(v >= threshold) continue; ctx->work_buffer[i] = new_tile; } } // Same as above, but skips lines 0 and 7 and tiles 0,7 on each line // presumably to avoid softlocking __device__ void map_place_tile_prob_safe(ForcerContext* ctx, u8 old, u8 new_tile, u8 threshold) { if(!ctx) return; for(unsigned i = 8; i < 8*7; ++i) { if((i&7) == 0 || (i&7) == 7) continue; if(ctx->work_buffer[i] != old) continue; u8 v = rng_next(ctx); if(v >= threshold) continue; ctx->work_buffer[i] = new_tile; } } __device__ void map_place_tile_at(ForcerContext* ctx, u8 tile, u8 x, u8 y) { x = x & 0x0f; y = y & 0x0f; ctx->work_buffer[y * 8 + x] = tile; } __device__ void map_place_line_impl(ForcerContext* ctx, u8 tile, u8 pos1, u8 pos2) { if(!ctx) return; u8 x1 = (pos1 >> 4u) & 0xf, x2 = (pos2 >> 4u) & 0xf, y1 = pos1 & 0x0f, y2 = pos2 & 0x0f; u8 ystep = (y1 < y2) ? 0x01 : 0xFF, xstep = (x1 < x2) ? 0x01 : 0xFF; while((x1 != x2) || (y1 != y2)) { map_place_tile_at(ctx, tile, x1, y1); if(x1 != x2) x1 += xstep; map_place_tile_at(ctx, tile, x1, y1); if(y1 != y2) y1 += ystep; map_place_tile_at(ctx, tile, x1, y1); } } __device__ void map_place_line(ForcerContext* ctx, u8 tile, u8 start, u8 end) { if(!ctx) return; u8 x = rng_next(ctx) & 7; while(x == 0 || x == 7) x = rng_next(ctx) & 7; u8 y = rng_next(ctx) & 7; while(y == 0 || y == 7) y = rng_next(ctx) & 7; u8 point_pos = (x << 4u) | y; map_place_line_impl(ctx, tile, start, point_pos); map_place_line_impl(ctx, tile, point_pos, end); } __device__ void maybe_map_gen(ForcerContext* ctx, u8 old, u8 new_tile, u8 threshold, u8 hfindpathflags, u8 hmultiplier, u8 hfindpathxprogress, u8 hmultiplybuffer) { if(!ctx) return; // b - old tile // c - new tile // d - threshold // ???? - findpathflags // ???? - multiplier // ???? - findpathxprogress // ???? - multiplybuffer // tile - hmutatewx unsigned i = 8, c = 0x30; while(c > 0) { if((i&7) == 0 || (i&7) == 7) { i++; c--; continue; } if(ctx->work_buffer[i] != old) { i++; c--; continue; } u8 v = rng_next(ctx); if(v < threshold) { i++; c--; continue; } i -= 0x8; if(hfindpathflags != 0) { if(ctx->work_buffer[i] != hfindpathflags) { i += 0x9; c--; continue; } } i += 0x10; if(hmultiplier != 0) { if(ctx->work_buffer[i] != hmultiplier) { i -= 0x7; c--; continue; } } i -= 0x09; if(hfindpathxprogress != 0) { if(ctx->work_buffer[i] != hfindpathxprogress) { i += 2; c--; continue; } } i += 2; if(hmultiplybuffer != 0) { if(ctx->work_buffer[i] != hmultiplybuffer) { c--; continue; } } ctx->work_buffer[i-1] = new_tile; c--; } } __device__ void gen_map_data(ForcerContext* ctx, u32 seed, u16 x, u16 y) { if(!ctx) return; //fmt::print("Generating map[{},{}] with seed={:08x}\n", x, y, seed); // Generate control byte auto ctl = gen_ctl(ctx, seed, x,y); // Fill work buffer with 0F memset(&ctx->work_buffer[0], 0x0F, 8 * 8); u8 var1 = 0; if(ctl & 1) var1 = 0x74; if(ctl & 2) var1 = 0x04; if(ctl & 4) var1 = 0x47; if(ctl & 8) var1 = 0x40; // Reinitialize RNG u32 seed_mask = ((u32)x << 16u) | y; rng_reinitialize(ctx, seed ^ seed_mask); // World gen? if(ctl & 1) map_place_line(ctx, 0xA, var1, 0x74); if(ctl & 2) map_place_line(ctx, 0xA, var1, 0x04); if(ctl & 4) map_place_line(ctx, 0xA, var1, 0x47); if(ctl & 8) map_place_line(ctx, 0xA, var1, 0x40); // "Decompression"? map_decompress(ctx, 0x0A); // Place exits if(ctl & 8) { map_place_tile_at(ctx, 0xa, 0x3, 0x0); map_place_tile_at(ctx, 0xa, 0x4, 0x0); } if(ctl & 4) { map_place_tile_at(ctx, 0xa, 0x3, 0x7); map_place_tile_at(ctx, 0xa, 0x4, 0x7); } if(ctl & 2) { map_place_tile_at(ctx, 0xa, 0x0, 0x3); map_place_tile_at(ctx, 0xa, 0x0, 0x4); } if(ctl & 1) { map_place_tile_at(ctx, 0xa, 0x7, 0x3); map_place_tile_at(ctx, 0xa, 0x7, 0x4); } // Biome specific generation auto biome_ctl = (ctl >> 4u) & 0x3; switch(biome_ctl) { case 0: { map_place_tile_prob(ctx, 0xa, 0xb, 0x30); map_decompress(ctx, 0x0B); maybe_map_gen(ctx, 0x0f, 0x6c, 0x20, 0x0f, 0x0a, 0x0, 0x0); maybe_map_gen(ctx, 0x0f, 0x6f, 0x20, 0x0a, 0x0f, 0x0, 0x0); maybe_map_gen(ctx, 0x0f, 0x6e, 0x20, 0x0, 0x0, 0x0a, 0x0f); maybe_map_gen(ctx, 0x0f, 0x6d, 0x20, 0x0, 0x0, 0x0f, 0x0a); map_place_tile_prob(ctx, 0xa, 0x74, 0x30); map_place_tile_prob(ctx, 0xa, 0x7a, 0x30); map_place_tile_prob_safe(ctx, 0x6c, 0x33, 0x40); map_place_tile_prob_safe(ctx, 0x6d, 0x32, 0x40); map_place_tile_prob_safe(ctx, 0x6e, 0x60, 0x40); map_place_tile_prob_safe(ctx, 0x6f, 0x34, 0x40); break; } case 1: { map_place_tile_prob(ctx, 0xa, 0x7b, 0x40); map_place_tile_prob(ctx, 0xa, 0x7a, 0x30); map_place_tile_prob(ctx, 0xa, 0xb, 0xd0); map_place_tile_prob_safe(ctx, 0x0a, 0x08, 0x20); break; } case 2: { // Screw this assert(false); break; } case 3: { // Screw this assert(false); break; } default: break; } } __device__ void generate_visible_map(ForcerContext* ctx, u32 seed) { if(!ctx) return; auto get_base = [](u8 x, u8 y) { if(y == 0) { return 0x0 + x*8; } else if(y == 1) { return 0xC0 + x*8; } else { return 0x180 + x*8; } }; for(unsigned x = 0; x < 3; ++x) { for(unsigned y = 0; y < 3; ++y) { gen_map_data(ctx, seed, x, y+1); auto base = get_base(x,y); // Copy lines such that the lines in nearby chunks are sequential in memory for(unsigned i = 0; i < 8; ++i) memcpy(&ctx->preloaded_map[base + i * 0x18], &ctx->work_buffer[i * 8], 8); } } } __device__ bool search_sequence(ForcerContext* ctx, unsigned windowx, unsigned windowy) { if(!ctx) return false; if(windowx >= 24 || windowx + 5 >= 24 || windowy >= 24 || windowy + 5 >= 24) return false; const u8 sequence[25] = { 0x0b,0x0b,0x0b,0x74,0x0a, 0x0f,0x0b,0x0f,0x0a,0x0a, 0x0f,0x0f,0x0a,0x0a,0x0b, 0x0f,0x0b,0x0a,0x0a,0x0a, 0x0b,0x0b,0x0a,0x0a,0x74 }; unsigned c = 0; for(unsigned y = windowy; y < windowy + 5; ++y) { for(unsigned x = windowx; x < windowx + 5; ++x) { auto addr = y * 24 + x; if(ctx->preloaded_map[addr] != sequence[c]) return false; c++; if(c == 25) return true; } } return false; } __global__ void forcer_entrypoint(void* context_pool_base, u32 cycle_base) { if(!context_pool_base) return; auto threadNumInBlock = threadIdx.x + blockDim.x * threadIdx.y; auto blockNumInGrid = blockIdx.x + gridDim.x * blockIdx.y; auto threadsPerBlock = blockDim.x * blockDim.y; auto thread_number = blockNumInGrid * threadsPerBlock + threadNumInBlock; auto* ctx = (ForcerContext*)((u8*)context_pool_base + thread_number * sizeof(ForcerContext)); u32 input_seed = cycle_base + thread_number; auto seed = (input_seed << 4u) | 0x01u; generate_visible_map(ctx, seed); // Search the possible window for the sequence for(unsigned x = 6; x <= 13; ++x) { for(unsigned y = 6; y <= 13; ++y) { bool res = search_sequence(ctx, x,y); if(res) { printf("[Thread %d] SEED=%08x Found pattern occurence!\n", thread_number, seed); return; } } } } #define gpuErrchk(ans) { gpuAssert((ans), __FILE__, __LINE__); } inline void gpuAssert(hipError_t code, const char *file, int line, bool abort=true) { if (code != hipSuccess) { fprintf(stderr,"GPUassert: %s %s %d\n", hipGetErrorString(code), file, line); if (abort) exit(code); } } int main() { fmt::print("Bruteforce using {} blocks, {} threads per block, total {} CUDA threads\n", block_count, threads_per_block, core_count); fmt::print("Seeds per CUDA thread: {}\n", seeds_per_thread); fmt::print("Forcer context size: {} bytes\n", context_size); void* alloc_base; gpuErrchk(hipMalloc((void**)&alloc_base, context_size)); unsigned rounds = seed_count / core_count; fmt::print("Rounds: {}\n", rounds); auto force_start = std::chrono::high_resolution_clock::now(); auto start = force_start; for(unsigned i = 0; i < rounds; i++) { u32 seed_base = i * core_count; hipLaunchKernelGGL(( forcer_entrypoint), dim3(block_count), dim3(threads_per_block), 0, 0, alloc_base, seed_base); gpuErrchk(hipPeekAtLastError()); gpuErrchk(hipDeviceSynchronize()); auto end = std::chrono::high_resolution_clock::now(); auto duration = std::chrono::duration_cast<std::chrono::milliseconds>(end - start).count(); auto eta = ((rounds - i) * duration) / 1000; auto eta_mins = eta / 60; auto eta_secs = eta % 60; if((i % 8) == 0) { const auto since_start = std::chrono::duration_cast<std::chrono::seconds>(end - force_start).count(); const auto sps = (since_start == 0) ? 0 : ((i+1)*core_count) / since_start; fmt::print("Progress: round {}/{}, seeds: {:07x}x-{:07x}x [{}%], eta={}m:{}s, {} seeds/s\n", i, rounds, seed_base, (seed_base + core_count), 100.0 * seed_base / seed_count, eta_mins, eta_secs, sps); } start = end; } hipFree(alloc_base); return 0; }
b579eef06cc1954b44e64f72476790c8def52f25.cu
#include <cstdint> #include <cassert> #include <chrono> #include <fmt/format.h> typedef uint32_t u32; typedef uint16_t u16; typedef uint8_t u8; struct ForcerContext { u8 rng_state[4]; u8 work_buffer[8*8]; u8 decompress_buffer[0x30]; u8 preloaded_map[8*8*9]; u8* found_byte; }; static const unsigned seed_count = 0xfffffff + 1; static const unsigned block_count = 32; static const unsigned threads_per_block = 1024; static const unsigned core_count = block_count * threads_per_block; static const unsigned seeds_per_thread = seed_count / core_count; static const unsigned context_size = sizeof(ForcerContext) * core_count; __device__ u8 rng_next(ForcerContext* ctx) { if(!ctx) return 0x0; ctx->rng_state[0]++; ctx->rng_state[1] = ctx->rng_state[3] ^ ctx->rng_state[0] ^ ctx->rng_state[1]; ctx->rng_state[2] = ctx->rng_state[1] + ctx->rng_state[2]; ctx->rng_state[3] = ctx->rng_state[3] + ((ctx->rng_state[2]>>1) ^ ctx->rng_state[1]); return ctx->rng_state[3]; } __device__ void rng_reinitialize(ForcerContext* ctx, u32 seed) { if(!ctx) return; ctx->rng_state[3] = seed & 0xffu; ctx->rng_state[2] = (seed >> 8u) & 0xffu; ctx->rng_state[1] = (seed >> 16u) & 0xffu; ctx->rng_state[0] = (seed >> 24u) & 0xffu; for(unsigned i = 0; i < 0x10; ++i) rng_next(ctx); } __device__ u8 gen_ctl(ForcerContext* ctx, u32 seed, u16 x, u16 y) { if(!ctx) return 0x0; u8 masked_x = x & 3, masked_y = y & 3; u32 mask = ((u32)(y & 0xFFFC) << 16u) | (x & 0xFFFC); rng_reinitialize(ctx, seed ^ mask); u8 b1 = rng_next(ctx) & 7; static const u8 dd0c_lookup[128] = { 0x05,0x0B,0x06,0x00,0x0E,0x05,0x0B,0x03,0x09,0x0E,0x00,0x00,0x00,0x0C,0x00,0x00,0x00,0x0C,0x05,0x06,0x06,0x0D,0x0A,0x09,0x09,0x0B,0x06,0x00,0x00,0x05,0x0A,0x00,0x05,0x0B,0x06,0x00,0x0B,0x06,0x09,0x07,0x00,0x09,0x07,0x0A,0x00,0x05,0x0A,0x00,0x00,0x09,0x06,0x00,0x03,0x06,0x0C,0x05,0x00,0x09,0x0F,0x0A,0x00,0x05,0x0A,0x00,0x05,0x0A,0x00,0x00,0x0F,0x06,0x05,0x03,0x09,0x0F,0x0A,0x00,0x00,0x0C,0x00,0x00,0x05,0x0B,0x03,0x06,0x0E,0x00,0x00,0x0D,0x09,0x06,0x00,0x0C,0x00,0x0D,0x03,0x0A,0x00,0x09,0x06,0x00,0x06,0x05,0x0A,0x05,0x09,0x0F,0x06,0x0C,0x00,0x0C,0x09,0x0A,0x00,0x0C,0x00,0x00,0x03,0x0F,0x07,0x03,0x00,0x0D,0x0A,0x00,0x00,0x0C,0x00,0x00 }; auto offset = 4 * masked_y + masked_x; auto table_offset = offset + (b1 << 4u); u8 lookup_value = dd0c_lookup[table_offset]; u8 b2 = rng_next(ctx) & 0x30; b2 |= lookup_value; auto result = (((x&0xFF) | (y&0xFF)) & 0xFC) | ((x>>8u) | (y>>8u)); if(result != 0) return b2; else return b2 & 0x0F; } __device__ void map_decompress(ForcerContext* ctx, u8 new_tile) { if(!ctx) return; // Copy tiles to not trample over things while modifying memcpy(&ctx->decompress_buffer[0], &ctx->work_buffer[8], 0x30); for(unsigned i = 8; i < 8*7; ++i) { if(ctx->decompress_buffer[i - 8] != new_tile) continue; if(((i&7) == 0) || ((i&7) == 7)) continue; u8 v = rng_next(ctx); if(v & 1) { ctx->work_buffer[i-1] = new_tile; } if(v & 2) { ctx->work_buffer[i+1] = new_tile; } if(v & 4) { ctx->work_buffer[i-8] = new_tile; } if(v & 8) { ctx->work_buffer[i+8] = new_tile; } } } __device__ void map_place_tile_prob(ForcerContext* ctx, u8 old, u8 new_tile, u8 threshold) { if(!ctx) return; for(unsigned i = 0; i < 8*8; ++i) { if(ctx->work_buffer[i] != old) continue; u8 v = rng_next(ctx); if(v >= threshold) continue; ctx->work_buffer[i] = new_tile; } } // Same as above, but skips lines 0 and 7 and tiles 0,7 on each line // presumably to avoid softlocking __device__ void map_place_tile_prob_safe(ForcerContext* ctx, u8 old, u8 new_tile, u8 threshold) { if(!ctx) return; for(unsigned i = 8; i < 8*7; ++i) { if((i&7) == 0 || (i&7) == 7) continue; if(ctx->work_buffer[i] != old) continue; u8 v = rng_next(ctx); if(v >= threshold) continue; ctx->work_buffer[i] = new_tile; } } __device__ void map_place_tile_at(ForcerContext* ctx, u8 tile, u8 x, u8 y) { x = x & 0x0f; y = y & 0x0f; ctx->work_buffer[y * 8 + x] = tile; } __device__ void map_place_line_impl(ForcerContext* ctx, u8 tile, u8 pos1, u8 pos2) { if(!ctx) return; u8 x1 = (pos1 >> 4u) & 0xf, x2 = (pos2 >> 4u) & 0xf, y1 = pos1 & 0x0f, y2 = pos2 & 0x0f; u8 ystep = (y1 < y2) ? 0x01 : 0xFF, xstep = (x1 < x2) ? 0x01 : 0xFF; while((x1 != x2) || (y1 != y2)) { map_place_tile_at(ctx, tile, x1, y1); if(x1 != x2) x1 += xstep; map_place_tile_at(ctx, tile, x1, y1); if(y1 != y2) y1 += ystep; map_place_tile_at(ctx, tile, x1, y1); } } __device__ void map_place_line(ForcerContext* ctx, u8 tile, u8 start, u8 end) { if(!ctx) return; u8 x = rng_next(ctx) & 7; while(x == 0 || x == 7) x = rng_next(ctx) & 7; u8 y = rng_next(ctx) & 7; while(y == 0 || y == 7) y = rng_next(ctx) & 7; u8 point_pos = (x << 4u) | y; map_place_line_impl(ctx, tile, start, point_pos); map_place_line_impl(ctx, tile, point_pos, end); } __device__ void maybe_map_gen(ForcerContext* ctx, u8 old, u8 new_tile, u8 threshold, u8 hfindpathflags, u8 hmultiplier, u8 hfindpathxprogress, u8 hmultiplybuffer) { if(!ctx) return; // b - old tile // c - new tile // d - threshold // ???? - findpathflags // ???? - multiplier // ???? - findpathxprogress // ???? - multiplybuffer // tile - hmutatewx unsigned i = 8, c = 0x30; while(c > 0) { if((i&7) == 0 || (i&7) == 7) { i++; c--; continue; } if(ctx->work_buffer[i] != old) { i++; c--; continue; } u8 v = rng_next(ctx); if(v < threshold) { i++; c--; continue; } i -= 0x8; if(hfindpathflags != 0) { if(ctx->work_buffer[i] != hfindpathflags) { i += 0x9; c--; continue; } } i += 0x10; if(hmultiplier != 0) { if(ctx->work_buffer[i] != hmultiplier) { i -= 0x7; c--; continue; } } i -= 0x09; if(hfindpathxprogress != 0) { if(ctx->work_buffer[i] != hfindpathxprogress) { i += 2; c--; continue; } } i += 2; if(hmultiplybuffer != 0) { if(ctx->work_buffer[i] != hmultiplybuffer) { c--; continue; } } ctx->work_buffer[i-1] = new_tile; c--; } } __device__ void gen_map_data(ForcerContext* ctx, u32 seed, u16 x, u16 y) { if(!ctx) return; //fmt::print("Generating map[{},{}] with seed={:08x}\n", x, y, seed); // Generate control byte auto ctl = gen_ctl(ctx, seed, x,y); // Fill work buffer with 0F memset(&ctx->work_buffer[0], 0x0F, 8 * 8); u8 var1 = 0; if(ctl & 1) var1 = 0x74; if(ctl & 2) var1 = 0x04; if(ctl & 4) var1 = 0x47; if(ctl & 8) var1 = 0x40; // Reinitialize RNG u32 seed_mask = ((u32)x << 16u) | y; rng_reinitialize(ctx, seed ^ seed_mask); // World gen? if(ctl & 1) map_place_line(ctx, 0xA, var1, 0x74); if(ctl & 2) map_place_line(ctx, 0xA, var1, 0x04); if(ctl & 4) map_place_line(ctx, 0xA, var1, 0x47); if(ctl & 8) map_place_line(ctx, 0xA, var1, 0x40); // "Decompression"? map_decompress(ctx, 0x0A); // Place exits if(ctl & 8) { map_place_tile_at(ctx, 0xa, 0x3, 0x0); map_place_tile_at(ctx, 0xa, 0x4, 0x0); } if(ctl & 4) { map_place_tile_at(ctx, 0xa, 0x3, 0x7); map_place_tile_at(ctx, 0xa, 0x4, 0x7); } if(ctl & 2) { map_place_tile_at(ctx, 0xa, 0x0, 0x3); map_place_tile_at(ctx, 0xa, 0x0, 0x4); } if(ctl & 1) { map_place_tile_at(ctx, 0xa, 0x7, 0x3); map_place_tile_at(ctx, 0xa, 0x7, 0x4); } // Biome specific generation auto biome_ctl = (ctl >> 4u) & 0x3; switch(biome_ctl) { case 0: { map_place_tile_prob(ctx, 0xa, 0xb, 0x30); map_decompress(ctx, 0x0B); maybe_map_gen(ctx, 0x0f, 0x6c, 0x20, 0x0f, 0x0a, 0x0, 0x0); maybe_map_gen(ctx, 0x0f, 0x6f, 0x20, 0x0a, 0x0f, 0x0, 0x0); maybe_map_gen(ctx, 0x0f, 0x6e, 0x20, 0x0, 0x0, 0x0a, 0x0f); maybe_map_gen(ctx, 0x0f, 0x6d, 0x20, 0x0, 0x0, 0x0f, 0x0a); map_place_tile_prob(ctx, 0xa, 0x74, 0x30); map_place_tile_prob(ctx, 0xa, 0x7a, 0x30); map_place_tile_prob_safe(ctx, 0x6c, 0x33, 0x40); map_place_tile_prob_safe(ctx, 0x6d, 0x32, 0x40); map_place_tile_prob_safe(ctx, 0x6e, 0x60, 0x40); map_place_tile_prob_safe(ctx, 0x6f, 0x34, 0x40); break; } case 1: { map_place_tile_prob(ctx, 0xa, 0x7b, 0x40); map_place_tile_prob(ctx, 0xa, 0x7a, 0x30); map_place_tile_prob(ctx, 0xa, 0xb, 0xd0); map_place_tile_prob_safe(ctx, 0x0a, 0x08, 0x20); break; } case 2: { // Screw this assert(false); break; } case 3: { // Screw this assert(false); break; } default: break; } } __device__ void generate_visible_map(ForcerContext* ctx, u32 seed) { if(!ctx) return; auto get_base = [](u8 x, u8 y) { if(y == 0) { return 0x0 + x*8; } else if(y == 1) { return 0xC0 + x*8; } else { return 0x180 + x*8; } }; for(unsigned x = 0; x < 3; ++x) { for(unsigned y = 0; y < 3; ++y) { gen_map_data(ctx, seed, x, y+1); auto base = get_base(x,y); // Copy lines such that the lines in nearby chunks are sequential in memory for(unsigned i = 0; i < 8; ++i) memcpy(&ctx->preloaded_map[base + i * 0x18], &ctx->work_buffer[i * 8], 8); } } } __device__ bool search_sequence(ForcerContext* ctx, unsigned windowx, unsigned windowy) { if(!ctx) return false; if(windowx >= 24 || windowx + 5 >= 24 || windowy >= 24 || windowy + 5 >= 24) return false; const u8 sequence[25] = { 0x0b,0x0b,0x0b,0x74,0x0a, 0x0f,0x0b,0x0f,0x0a,0x0a, 0x0f,0x0f,0x0a,0x0a,0x0b, 0x0f,0x0b,0x0a,0x0a,0x0a, 0x0b,0x0b,0x0a,0x0a,0x74 }; unsigned c = 0; for(unsigned y = windowy; y < windowy + 5; ++y) { for(unsigned x = windowx; x < windowx + 5; ++x) { auto addr = y * 24 + x; if(ctx->preloaded_map[addr] != sequence[c]) return false; c++; if(c == 25) return true; } } return false; } __global__ void forcer_entrypoint(void* context_pool_base, u32 cycle_base) { if(!context_pool_base) return; auto threadNumInBlock = threadIdx.x + blockDim.x * threadIdx.y; auto blockNumInGrid = blockIdx.x + gridDim.x * blockIdx.y; auto threadsPerBlock = blockDim.x * blockDim.y; auto thread_number = blockNumInGrid * threadsPerBlock + threadNumInBlock; auto* ctx = (ForcerContext*)((u8*)context_pool_base + thread_number * sizeof(ForcerContext)); u32 input_seed = cycle_base + thread_number; auto seed = (input_seed << 4u) | 0x01u; generate_visible_map(ctx, seed); // Search the possible window for the sequence for(unsigned x = 6; x <= 13; ++x) { for(unsigned y = 6; y <= 13; ++y) { bool res = search_sequence(ctx, x,y); if(res) { printf("[Thread %d] SEED=%08x Found pattern occurence!\n", thread_number, seed); return; } } } } #define gpuErrchk(ans) { gpuAssert((ans), __FILE__, __LINE__); } inline void gpuAssert(cudaError_t code, const char *file, int line, bool abort=true) { if (code != cudaSuccess) { fprintf(stderr,"GPUassert: %s %s %d\n", cudaGetErrorString(code), file, line); if (abort) exit(code); } } int main() { fmt::print("Bruteforce using {} blocks, {} threads per block, total {} CUDA threads\n", block_count, threads_per_block, core_count); fmt::print("Seeds per CUDA thread: {}\n", seeds_per_thread); fmt::print("Forcer context size: {} bytes\n", context_size); void* alloc_base; gpuErrchk(cudaMalloc((void**)&alloc_base, context_size)); unsigned rounds = seed_count / core_count; fmt::print("Rounds: {}\n", rounds); auto force_start = std::chrono::high_resolution_clock::now(); auto start = force_start; for(unsigned i = 0; i < rounds; i++) { u32 seed_base = i * core_count; forcer_entrypoint<<<block_count, threads_per_block>>>(alloc_base, seed_base); gpuErrchk(cudaPeekAtLastError()); gpuErrchk(cudaDeviceSynchronize()); auto end = std::chrono::high_resolution_clock::now(); auto duration = std::chrono::duration_cast<std::chrono::milliseconds>(end - start).count(); auto eta = ((rounds - i) * duration) / 1000; auto eta_mins = eta / 60; auto eta_secs = eta % 60; if((i % 8) == 0) { const auto since_start = std::chrono::duration_cast<std::chrono::seconds>(end - force_start).count(); const auto sps = (since_start == 0) ? 0 : ((i+1)*core_count) / since_start; fmt::print("Progress: round {}/{}, seeds: {:07x}x-{:07x}x [{}%], eta={}m:{}s, {} seeds/s\n", i, rounds, seed_base, (seed_base + core_count), 100.0 * seed_base / seed_count, eta_mins, eta_secs, sps); } start = end; } cudaFree(alloc_base); return 0; }
7cda6ce230dab4cefd606b8c358fcbbe1d2bafcc.hip
// !!! This is a file automatically generated by hipify!!! /* This version assigns one thread per 16 bytes of text.(one text block) Stores the plaintext/ciphertext in registers. Stores the encryption keys in shared memory. Stores the S-boxes in shared memory. The blocksize is 512. */ #include <iostream> #include <fstream> #include <sstream> #include <chrono> #include <hip/hip_runtime_api.h> #include <device_launch_parameters.h> typedef unsigned char uint8; //Key generation constants uint8 C1[] = { 0x51,0x7c,0xc1,0xb7,0x27,0x22,0x0a,0x94,0xfe,0x13,0xab,0xe8,0xfa,0x9a,0x6e,0xe0 }; uint8 C2[] = { 0x6d,0xb1,0x4a,0xcc,0x9e,0x21,0xc8,0x20,0xff,0x28,0xb1,0xd5,0xef,0x5d,0xe2,0xb0 }; uint8 C3[] = { 0xdb,0x92,0x37,0x1d,0x21,0x26,0xe9,0x70,0x03,0x24,0x97,0x75,0x04,0xe8,0xc9,0x0e }; //Encryption round keys uint8 ek[272] = { 0 }; //272 bytes(17 round keys each 16 bytes) //Decyription round keys uint8 dk[272] = { 0 }; //272 bytes(17 round keys each 16 bytes) //S-boxes static const uint8 SB1[256] = { 0x63, 0x7C, 0x77, 0x7B, 0xF2, 0x6B, 0x6F, 0xC5, 0x30, 0x01, 0x67, 0x2B, 0xFE, 0xD7, 0xAB, 0x76, 0xCA, 0x82, 0xC9, 0x7D, 0xFA, 0x59, 0x47, 0xF0, 0xAD, 0xD4, 0xA2, 0xAF, 0x9C, 0xA4, 0x72, 0xC0, 0xB7, 0xFD, 0x93, 0x26, 0x36, 0x3F, 0xF7, 0xCC, 0x34, 0xA5, 0xE5, 0xF1, 0x71, 0xD8, 0x31, 0x15, 0x04, 0xC7, 0x23, 0xC3, 0x18, 0x96, 0x05, 0x9A, 0x07, 0x12, 0x80, 0xE2, 0xEB, 0x27, 0xB2, 0x75, 0x09, 0x83, 0x2C, 0x1A, 0x1B, 0x6E, 0x5A, 0xA0, 0x52, 0x3B, 0xD6, 0xB3, 0x29, 0xE3, 0x2F, 0x84, 0x53, 0xD1, 0x00, 0xED, 0x20, 0xFC, 0xB1, 0x5B, 0x6A, 0xCB, 0xBE, 0x39, 0x4A, 0x4C, 0x58, 0xCF, 0xD0, 0xEF, 0xAA, 0xFB, 0x43, 0x4D, 0x33, 0x85, 0x45, 0xF9, 0x02, 0x7F, 0x50, 0x3C, 0x9F, 0xA8, 0x51, 0xA3, 0x40, 0x8F, 0x92, 0x9D, 0x38, 0xF5, 0xBC, 0xB6, 0xDA, 0x21, 0x10, 0xFF, 0xF3, 0xD2, 0xCD, 0x0C, 0x13, 0xEC, 0x5F, 0x97, 0x44, 0x17, 0xC4, 0xA7, 0x7E, 0x3D, 0x64, 0x5D, 0x19, 0x73, 0x60, 0x81, 0x4F, 0xDC, 0x22, 0x2A, 0x90, 0x88, 0x46, 0xEE, 0xB8, 0x14, 0xDE, 0x5E, 0x0B, 0xDB, 0xE0, 0x32, 0x3A, 0x0A, 0x49, 0x06, 0x24, 0x5C, 0xC2, 0xD3, 0xAC, 0x62, 0x91, 0x95, 0xE4, 0x79, 0xE7, 0xC8, 0x37, 0x6D, 0x8D, 0xD5, 0x4E, 0xA9, 0x6C, 0x56, 0xF4, 0xEA, 0x65, 0x7A, 0xAE, 0x08, 0xBA, 0x78, 0x25, 0x2E, 0x1C, 0xA6, 0xB4, 0xC6, 0xE8, 0xDD, 0x74, 0x1F, 0x4B, 0xBD, 0x8B, 0x8A, 0x70, 0x3E, 0xB5, 0x66, 0x48, 0x03, 0xF6, 0x0E, 0x61, 0x35, 0x57, 0xB9, 0x86, 0xC1, 0x1D, 0x9E, 0xE1, 0xF8, 0x98, 0x11, 0x69, 0xD9, 0x8E, 0x94, 0x9B, 0x1E, 0x87, 0xE9, 0xCE, 0x55, 0x28, 0xDF, 0x8C, 0xA1, 0x89, 0x0D, 0xBF, 0xE6, 0x42, 0x68, 0x41, 0x99, 0x2D, 0x0F, 0xB0, 0x54, 0xBB, 0x16 }; static const uint8 SB2[256] = { 0xE2, 0x4E, 0x54, 0xFC, 0x94, 0xC2, 0x4A, 0xCC, 0x62, 0x0D, 0x6A, 0x46, 0x3C, 0x4D, 0x8B, 0xD1, 0x5E, 0xFA, 0x64, 0xCB, 0xB4, 0x97, 0xBE, 0x2B, 0xBC, 0x77, 0x2E, 0x03, 0xD3, 0x19, 0x59, 0xC1, 0x1D, 0x06, 0x41, 0x6B, 0x55, 0xF0, 0x99, 0x69, 0xEA, 0x9C, 0x18, 0xAE, 0x63, 0xDF, 0xE7, 0xBB, 0x00, 0x73, 0x66, 0xFB, 0x96, 0x4C, 0x85, 0xE4, 0x3A, 0x09, 0x45, 0xAA, 0x0F, 0xEE, 0x10, 0xEB, 0x2D, 0x7F, 0xF4, 0x29, 0xAC, 0xCF, 0xAD, 0x91, 0x8D, 0x78, 0xC8, 0x95, 0xF9, 0x2F, 0xCE, 0xCD, 0x08, 0x7A, 0x88, 0x38, 0x5C, 0x83, 0x2A, 0x28, 0x47, 0xDB, 0xB8, 0xC7, 0x93, 0xA4, 0x12, 0x53, 0xFF, 0x87, 0x0E, 0x31, 0x36, 0x21, 0x58, 0x48, 0x01, 0x8E, 0x37, 0x74, 0x32, 0xCA, 0xE9, 0xB1, 0xB7, 0xAB, 0x0C, 0xD7, 0xC4, 0x56, 0x42, 0x26, 0x07, 0x98, 0x60, 0xD9, 0xB6, 0xB9, 0x11, 0x40, 0xEC, 0x20, 0x8C, 0xBD, 0xA0, 0xC9, 0x84, 0x04, 0x49, 0x23, 0xF1, 0x4F, 0x50, 0x1F, 0x13, 0xDC, 0xD8, 0xC0, 0x9E, 0x57, 0xE3, 0xC3, 0x7B, 0x65, 0x3B, 0x02, 0x8F, 0x3E, 0xE8, 0x25, 0x92, 0xE5, 0x15, 0xDD, 0xFD, 0x17, 0xA9, 0xBF, 0xD4, 0x9A, 0x7E, 0xC5, 0x39, 0x67, 0xFE, 0x76, 0x9D, 0x43, 0xA7, 0xE1, 0xD0, 0xF5, 0x68, 0xF2, 0x1B, 0x34, 0x70, 0x05, 0xA3, 0x8A, 0xD5, 0x79, 0x86, 0xA8, 0x30, 0xC6, 0x51, 0x4B, 0x1E, 0xA6, 0x27, 0xF6, 0x35, 0xD2, 0x6E, 0x24, 0x16, 0x82, 0x5F, 0xDA, 0xE6, 0x75, 0xA2, 0xEF, 0x2C, 0xB2, 0x1C, 0x9F, 0x5D, 0x6F, 0x80, 0x0A, 0x72, 0x44, 0x9B, 0x6C, 0x90, 0x0B, 0x5B, 0x33, 0x7D, 0x5A, 0x52, 0xF3, 0x61, 0xA1, 0xF7, 0xB0, 0xD6, 0x3F, 0x7C, 0x6D, 0xED, 0x14, 0xE0, 0xA5, 0x3D, 0x22, 0xB3, 0xF8, 0x89, 0xDE, 0x71, 0x1A, 0xAF, 0xBA, 0xB5, 0x81 }; static const uint8 SB3[256] = { 0x52, 0x09, 0x6A, 0xD5, 0x30, 0x36, 0xA5, 0x38, 0xBF, 0x40, 0xA3, 0x9E, 0x81, 0xF3, 0xD7, 0xFB, 0x7C, 0xE3, 0x39, 0x82, 0x9B, 0x2F, 0xFF, 0x87, 0x34, 0x8E, 0x43, 0x44, 0xC4, 0xDE, 0xE9, 0xCB, 0x54, 0x7B, 0x94, 0x32, 0xA6, 0xC2, 0x23, 0x3D, 0xEE, 0x4C, 0x95, 0x0B, 0x42, 0xFA, 0xC3, 0x4E, 0x08, 0x2E, 0xA1, 0x66, 0x28, 0xD9, 0x24, 0xB2, 0x76, 0x5B, 0xA2, 0x49, 0x6D, 0x8B, 0xD1, 0x25, 0x72, 0xF8, 0xF6, 0x64, 0x86, 0x68, 0x98, 0x16, 0xD4, 0xA4, 0x5C, 0xCC, 0x5D, 0x65, 0xB6, 0x92, 0x6C, 0x70, 0x48, 0x50, 0xFD, 0xED, 0xB9, 0xDA, 0x5E, 0x15, 0x46, 0x57, 0xA7, 0x8D, 0x9D, 0x84, 0x90, 0xD8, 0xAB, 0x00, 0x8C, 0xBC, 0xD3, 0x0A, 0xF7, 0xE4, 0x58, 0x05, 0xB8, 0xB3, 0x45, 0x06, 0xD0, 0x2C, 0x1E, 0x8F, 0xCA, 0x3F, 0x0F, 0x02, 0xC1, 0xAF, 0xBD, 0x03, 0x01, 0x13, 0x8A, 0x6B, 0x3A, 0x91, 0x11, 0x41, 0x4F, 0x67, 0xDC, 0xEA, 0x97, 0xF2, 0xCF, 0xCE, 0xF0, 0xB4, 0xE6, 0x73, 0x96, 0xAC, 0x74, 0x22, 0xE7, 0xAD, 0x35, 0x85, 0xE2, 0xF9, 0x37, 0xE8, 0x1C, 0x75, 0xDF, 0x6E, 0x47, 0xF1, 0x1A, 0x71, 0x1D, 0x29, 0xC5, 0x89, 0x6F, 0xB7, 0x62, 0x0E, 0xAA, 0x18, 0xBE, 0x1B, 0xFC, 0x56, 0x3E, 0x4B, 0xC6, 0xD2, 0x79, 0x20, 0x9A, 0xDB, 0xC0, 0xFE, 0x78, 0xCD, 0x5A, 0xF4, 0x1F, 0xDD, 0xA8, 0x33, 0x88, 0x07, 0xC7, 0x31, 0xB1, 0x12, 0x10, 0x59, 0x27, 0x80, 0xEC, 0x5F, 0x60, 0x51, 0x7F, 0xA9, 0x19, 0xB5, 0x4A, 0x0D, 0x2D, 0xE5, 0x7A, 0x9F, 0x93, 0xC9, 0x9C, 0xEF, 0xA0, 0xE0, 0x3B, 0x4D, 0xAE, 0x2A, 0xF5, 0xB0, 0xC8, 0xEB, 0xBB, 0x3C, 0x83, 0x53, 0x99, 0x61, 0x17, 0x2B, 0x04, 0x7E, 0xBA, 0x77, 0xD6, 0x26, 0xE1, 0x69, 0x14, 0x63, 0x55, 0x21, 0x0C, 0x7D }; static const uint8 SB4[256] = { 0x30, 0x68, 0x99, 0x1B, 0x87, 0xB9, 0x21, 0x78, 0x50, 0x39, 0xDB, 0xE1, 0x72, 0x09, 0x62, 0x3C, 0x3E, 0x7E, 0x5E, 0x8E, 0xF1, 0xA0, 0xCC, 0xA3, 0x2A, 0x1D, 0xFB, 0xB6, 0xD6, 0x20, 0xC4, 0x8D, 0x81, 0x65, 0xF5, 0x89, 0xCB, 0x9D, 0x77, 0xC6, 0x57, 0x43, 0x56, 0x17, 0xD4, 0x40, 0x1A, 0x4D, 0xC0, 0x63, 0x6C, 0xE3, 0xB7, 0xC8, 0x64, 0x6A, 0x53, 0xAA, 0x38, 0x98, 0x0C, 0xF4, 0x9B, 0xED, 0x7F, 0x22, 0x76, 0xAF, 0xDD, 0x3A, 0x0B, 0x58, 0x67, 0x88, 0x06, 0xC3, 0x35, 0x0D, 0x01, 0x8B, 0x8C, 0xC2, 0xE6, 0x5F, 0x02, 0x24, 0x75, 0x93, 0x66, 0x1E, 0xE5, 0xE2, 0x54, 0xD8, 0x10, 0xCE, 0x7A, 0xE8, 0x08, 0x2C, 0x12, 0x97, 0x32, 0xAB, 0xB4, 0x27, 0x0A, 0x23, 0xDF, 0xEF, 0xCA, 0xD9, 0xB8, 0xFA, 0xDC, 0x31, 0x6B, 0xD1, 0xAD, 0x19, 0x49, 0xBD, 0x51, 0x96, 0xEE, 0xE4, 0xA8, 0x41, 0xDA, 0xFF, 0xCD, 0x55, 0x86, 0x36, 0xBE, 0x61, 0x52, 0xF8, 0xBB, 0x0E, 0x82, 0x48, 0x69, 0x9A, 0xE0, 0x47, 0x9E, 0x5C, 0x04, 0x4B, 0x34, 0x15, 0x79, 0x26, 0xA7, 0xDE, 0x29, 0xAE, 0x92, 0xD7, 0x84, 0xE9, 0xD2, 0xBA, 0x5D, 0xF3, 0xC5, 0xB0, 0xBF, 0xA4, 0x3B, 0x71, 0x44, 0x46, 0x2B, 0xFC, 0xEB, 0x6F, 0xD5, 0xF6, 0x14, 0xFE, 0x7C, 0x70, 0x5A, 0x7D, 0xFD, 0x2F, 0x18, 0x83, 0x16, 0xA5, 0x91, 0x1F, 0x05, 0x95, 0x74, 0xA9, 0xC1, 0x5B, 0x4A, 0x85, 0x6D, 0x13, 0x07, 0x4F, 0x4E, 0x45, 0xB2, 0x0F, 0xC9, 0x1C, 0xA6, 0xBC, 0xEC, 0x73, 0x90, 0x7B, 0xCF, 0x59, 0x8F, 0xA1, 0xF9, 0x2D, 0xF2, 0xB1, 0x00, 0x94, 0x37, 0x9F, 0xD0, 0x2E, 0x9C, 0x6E, 0x28, 0x3F, 0x80, 0xF0, 0x3D, 0xD3, 0x25, 0x8A, 0xB5, 0xE7, 0x42, 0xB3, 0xC7, 0xEA, 0xF7, 0x4C, 0x11, 0x33, 0x03, 0xA2, 0xAC, 0x60 }; uint8 hex2dec(char ch) { if (ch >= '0' && ch <= '9') return ch - '0'; else return ch - 'a' + 10; } uint8 leftRotate(uint8 n, uint8 d) { return (n << d) | (n >> (8 - d)); } uint8 rightRotate(uint8 n, uint8 d) { return (n >> d) | (n << (8 - d)); } uint8* RightShiftBytes(uint8* arr, int arrSize, int amount)//shift the bytes, place them in a new array { uint8* tmp = (uint8*) malloc(amount); uint8* newArr = (uint8*)malloc(16 * sizeof(uint8)); for (int i = 0; i < amount; i++) { tmp[i] = arr[arrSize - amount + i]; } for (int i = arrSize - 1; i >= amount; i--) { newArr[i] = arr[i - amount]; } for (int i = 0; i < amount; i++) { newArr[i] = tmp[i]; } free(tmp); return newArr; } uint8* LeftShiftBytes(uint8* arr, int arrSize, int amount)//shift the bytes, place them in a new array { uint8* tmp = (uint8*)malloc(amount); uint8* newArr = (uint8*)malloc(16 * sizeof(uint8)); for (int i = 0; i < amount; i++) { tmp[i] = arr[i]; } for (int i = 0; i < arrSize - amount; i++) { newArr[i] = arr[i + amount]; } for (int i = 0; i < amount; i++) { newArr[arrSize - amount + i] = tmp[i]; } free(tmp); return newArr; } uint8* ShiftArrR(uint8* originalArr, int amount) { int arrSize = 16; int byteShiftAmount = amount / 8; uint8* arr = RightShiftBytes(originalArr, arrSize, byteShiftAmount); amount = amount - byteShiftAmount * 8; uint8 carryTmp, carry; carry = arr[arrSize - 1] & (0xff >> (8 - amount));//bits that are shifted to byte on right for (int i = 0; i < arrSize; i++) { carryTmp = arr[i] & (0xff >> (8 - amount));//calculate carry for byte on right arr[i] >>= amount;//right shift the current byte. arr[i] |= rightRotate(carry, amount);//place the bits from coming from byte on left carry = carryTmp; } return arr; } uint8* ShiftArrL(uint8* originalArr, int amount) { int arrSize = 16; int byteShiftAmount = amount / 8; uint8* arr = LeftShiftBytes(originalArr, arrSize, byteShiftAmount); amount = amount - byteShiftAmount * 8; uint8 carryTmp, carry; carry = arr[0] & (0xff << (8 - amount));//bits that are shifted to byte on left for (int i = arrSize - 1; i >= 0; i--) { carryTmp = arr[i] & (0xff << (8 - amount));//calculate carry for byte on left arr[i] <<= amount;//left shift the current byte. arr[i] |= leftRotate(carry, amount);//place the bits from coming from byte on right carry = carryTmp; } return arr; } void XOR_16(uint8* x, uint8* y, uint8* z) { for (int i = 0; i < 16; i++) { z[i] = x[i] ^ y[i]; } } void XOR_16wFree(uint8* x, uint8* y, uint8* z) { for (int i = 0; i < 16; i++) { z[i] = x[i] ^ y[i]; } free(y); } //Substition Layer 1 void SL1(uint8* in, uint8* out) { out[0] = SB1[in[0]]; out[1] = SB2[in[1]]; out[2] = SB3[in[2]]; out[3] = SB4[in[3]]; out[4] = SB1[in[4]]; out[5] = SB2[in[5]]; out[6] = SB3[in[6]]; out[7] = SB4[in[7]]; out[8] = SB1[in[8]]; out[9] = SB2[in[9]]; out[10] = SB3[in[10]]; out[11] = SB4[in[11]]; out[12] = SB1[in[12]]; out[13] = SB2[in[13]]; out[14] = SB3[in[14]]; out[15] = SB4[in[15]]; } //Substition Layer 2(Inverse of SL1) void SL2(uint8* in, uint8* out) { out[0] = SB3[in[0]]; out[1] = SB4[in[1]]; out[2] = SB1[in[2]]; out[3] = SB2[in[3]]; out[4] = SB3[in[4]]; out[5] = SB4[in[5]]; out[6] = SB1[in[6]]; out[7] = SB2[in[7]]; out[8] = SB3[in[8]]; out[9] = SB4[in[9]]; out[10] = SB1[in[10]]; out[11] = SB2[in[11]]; out[12] = SB3[in[12]]; out[13] = SB4[in[13]]; out[14] = SB1[in[14]]; out[15] = SB2[in[15]]; } //Diffusion layer void A(uint8* in, uint8* out) { out[0] = in[3] ^ in[4] ^ in[6] ^ in[8] ^ in[9] ^ in[13] ^ in[14]; out[1] = in[2] ^ in[5] ^ in[7] ^ in[8] ^ in[9] ^ in[12] ^ in[15]; out[2] = in[1] ^ in[4] ^ in[6] ^ in[10] ^ in[11] ^ in[12] ^ in[15]; out[3] = in[0] ^ in[5] ^ in[7] ^ in[10] ^ in[11] ^ in[13] ^ in[14]; out[4] = in[0] ^ in[2] ^ in[5] ^ in[8] ^ in[11] ^ in[14] ^ in[15]; out[5] = in[1] ^ in[3] ^ in[4] ^ in[9] ^ in[10] ^ in[14] ^ in[15]; out[6] = in[0] ^ in[2] ^ in[7] ^ in[9] ^ in[10] ^ in[12] ^ in[13]; out[7] = in[1] ^ in[3] ^ in[6] ^ in[8] ^ in[11] ^ in[12] ^ in[13]; out[8] = in[0] ^ in[1] ^ in[4] ^ in[7] ^ in[10] ^ in[13] ^ in[15]; out[9] = in[0] ^ in[1] ^ in[5] ^ in[6] ^ in[11] ^ in[12] ^ in[14]; out[10] = in[2] ^ in[3] ^ in[5] ^ in[6] ^ in[8] ^ in[13] ^ in[15]; out[11] = in[2] ^ in[3] ^ in[4] ^ in[7] ^ in[9] ^ in[12] ^ in[14]; out[12] = in[1] ^ in[2] ^ in[6] ^ in[7] ^ in[9] ^ in[11] ^ in[12]; out[13] = in[0] ^ in[3] ^ in[6] ^ in[7] ^ in[8] ^ in[10] ^ in[13]; out[14] = in[0] ^ in[3] ^ in[4] ^ in[5] ^ in[9] ^ in[11] ^ in[14]; out[15] = in[1] ^ in[2] ^ in[4] ^ in[5] ^ in[8] ^ in[10] ^ in[15]; } /*Round Functions(F0,FE) takes 16 bytes of plaintext and generates an intermediate val of 16bytes */ //Odd Round Function void F0(uint8* D, uint8* RK, uint8* out) { //res1, res2 are auxillary arrays for storing the results of XOR_16 and SL1 uint8 res1[16]; uint8 res2[16]; XOR_16(D, RK, res1); SL1(res1, res2); A(res2, out); } //Even Round Function void FE(uint8* D, uint8* RK, uint8* out) { //res1, res2 are auxillary arrays for storing the results of XOR_16 and SL1 uint8 res1[16]; uint8 res2[16]; XOR_16(D, RK, res1); SL2(res1, res2); A(res2, out); } void GenerateRoundKeys(uint8* W0, uint8* W1, uint8* W2, uint8* W3) { //Producing encryption round keys //Producing encryption round keys can be parallelized. //However since we do this once for all blocks, it is faster to compute in CPU. //ShiftArr functions return array from heap, must free. XOR_16wFree(W0, ShiftArrR(W1, 19), &ek[0]); XOR_16wFree(W1, ShiftArrR(W2, 19), &ek[16]); XOR_16wFree(W2, ShiftArrR(W3, 19), &ek[32]); XOR_16wFree(W3, ShiftArrR(W0, 19), &ek[48]); XOR_16wFree(W0, ShiftArrR(W1, 31), &ek[64]); XOR_16wFree(W1, ShiftArrR(W2, 31), &ek[80]); XOR_16wFree(W2, ShiftArrR(W3, 31), &ek[96]); XOR_16wFree(W3, ShiftArrR(W0, 31), &ek[112]); XOR_16wFree(W0, ShiftArrL(W1, 61), &ek[128]); XOR_16wFree(W1, ShiftArrL(W2, 61), &ek[144]); XOR_16wFree(W2, ShiftArrL(W3, 61), &ek[160]); XOR_16wFree(W3, ShiftArrL(W0, 61), &ek[176]); XOR_16wFree(W0, ShiftArrL(W1, 31), &ek[192]); XOR_16wFree(W1, ShiftArrL(W2, 31), &ek[208]); XOR_16wFree(W2, ShiftArrL(W3, 31), &ek[224]); XOR_16wFree(W3, ShiftArrL(W0, 31), &ek[240]); XOR_16wFree(W0, ShiftArrL(W1, 19), &ek[256]); } void GenerateDecRoundKeys(uint8 numOfRounds) { int N = numOfRounds - 1; int k = 1; for (int i = 0; i < 16; i++) { dk[i] = ek[16 * N + i]; } for (int i = N - 1; i >= 1; i--) { A(&ek[i * 16], &dk[k * 16]); k++; } for (int i = 0; i < 16; i++) { dk[k * 16 + i] = ek[i]; } } //Odd Round Function __device__ void F0_d(uint8* D, const uint8* RK, uint8* SB1, uint8* SB2, uint8* SB3, uint8* SB4) { uint8 aux[16];//auxilary array for keeping the results of Diffusion layer //XOR with the round key #pragma unroll for (int i = 0; i < 16; i++) { D[i] = D[i] ^ RK[i]; } //Substition Layer(SL1) D[0] = SB1[D[0]]; D[1] = SB2[D[1]]; D[2] = SB3[D[2]]; D[3] = SB4[D[3]]; D[4] = SB1[D[4]]; D[5] = SB2[D[5]]; D[6] = SB3[D[6]]; D[7] = SB4[D[7]]; D[8] = SB1[D[8]]; D[9] = SB2[D[9]]; D[10] = SB3[D[10]]; D[11] = SB4[D[11]]; D[12] = SB1[D[12]]; D[13] = SB2[D[13]]; D[14] = SB3[D[14]]; D[15] = SB4[D[15]]; //Diffusion layer aux[0] = D[3] ^ D[4] ^ D[6] ^ D[8] ^ D[9] ^ D[13] ^ D[14]; aux[1] = D[2] ^ D[5] ^ D[7] ^ D[8] ^ D[9] ^ D[12] ^ D[15]; aux[2] = D[1] ^ D[4] ^ D[6] ^ D[10] ^ D[11] ^ D[12] ^ D[15]; aux[3] = D[0] ^ D[5] ^ D[7] ^ D[10] ^ D[11] ^ D[13] ^ D[14]; aux[4] = D[0] ^ D[2] ^ D[5] ^ D[8] ^ D[11] ^ D[14] ^ D[15]; aux[5] = D[1] ^ D[3] ^ D[4] ^ D[9] ^ D[10] ^ D[14] ^ D[15]; aux[6] = D[0] ^ D[2] ^ D[7] ^ D[9] ^ D[10] ^ D[12] ^ D[13]; aux[7] = D[1] ^ D[3] ^ D[6] ^ D[8] ^ D[11] ^ D[12] ^ D[13]; aux[8] = D[0] ^ D[1] ^ D[4] ^ D[7] ^ D[10] ^ D[13] ^ D[15]; aux[9] = D[0] ^ D[1] ^ D[5] ^ D[6] ^ D[11] ^ D[12] ^ D[14]; aux[10] = D[2] ^ D[3] ^ D[5] ^ D[6] ^ D[8] ^ D[13] ^ D[15]; aux[11] = D[2] ^ D[3] ^ D[4] ^ D[7] ^ D[9] ^ D[12] ^ D[14]; aux[12] = D[1] ^ D[2] ^ D[6] ^ D[7] ^ D[9] ^ D[11] ^ D[12]; aux[13] = D[0] ^ D[3] ^ D[6] ^ D[7] ^ D[8] ^ D[10] ^ D[13]; aux[14] = D[0] ^ D[3] ^ D[4] ^ D[5] ^ D[9] ^ D[11] ^ D[14]; aux[15] = D[1] ^ D[2] ^ D[4] ^ D[5] ^ D[8] ^ D[10] ^ D[15]; //put the result into plaintext registers #pragma unroll for (int i = 0; i < 16; i++) { D[i] = aux[i]; } } //Even Round Function __device__ void FE_d(uint8* D, const uint8* RK, uint8* SB1, uint8* SB2, uint8* SB3, uint8* SB4) { uint8 aux[16];//auxilary array for keeping the results of Diffusion layer //XOR with the round key #pragma unroll for (int i = 0; i < 16; i++) { D[i] = D[i] ^ RK[i]; } //Substition Layer(SL2) D[0] = SB3[D[0]]; D[1] = SB4[D[1]]; D[2] = SB1[D[2]]; D[3] = SB2[D[3]]; D[4] = SB3[D[4]]; D[5] = SB4[D[5]]; D[6] = SB1[D[6]]; D[7] = SB2[D[7]]; D[8] = SB3[D[8]]; D[9] = SB4[D[9]]; D[10] = SB1[D[10]]; D[11] = SB2[D[11]]; D[12] = SB3[D[12]]; D[13] = SB4[D[13]]; D[14] = SB1[D[14]]; D[15] = SB2[D[15]]; //Diffusion layer aux[0] = D[3] ^ D[4] ^ D[6] ^ D[8] ^ D[9] ^ D[13] ^ D[14]; aux[1] = D[2] ^ D[5] ^ D[7] ^ D[8] ^ D[9] ^ D[12] ^ D[15]; aux[2] = D[1] ^ D[4] ^ D[6] ^ D[10] ^ D[11] ^ D[12] ^ D[15]; aux[3] = D[0] ^ D[5] ^ D[7] ^ D[10] ^ D[11] ^ D[13] ^ D[14]; aux[4] = D[0] ^ D[2] ^ D[5] ^ D[8] ^ D[11] ^ D[14] ^ D[15]; aux[5] = D[1] ^ D[3] ^ D[4] ^ D[9] ^ D[10] ^ D[14] ^ D[15]; aux[6] = D[0] ^ D[2] ^ D[7] ^ D[9] ^ D[10] ^ D[12] ^ D[13]; aux[7] = D[1] ^ D[3] ^ D[6] ^ D[8] ^ D[11] ^ D[12] ^ D[13]; aux[8] = D[0] ^ D[1] ^ D[4] ^ D[7] ^ D[10] ^ D[13] ^ D[15]; aux[9] = D[0] ^ D[1] ^ D[5] ^ D[6] ^ D[11] ^ D[12] ^ D[14]; aux[10] = D[2] ^ D[3] ^ D[5] ^ D[6] ^ D[8] ^ D[13] ^ D[15]; aux[11] = D[2] ^ D[3] ^ D[4] ^ D[7] ^ D[9] ^ D[12] ^ D[14]; aux[12] = D[1] ^ D[2] ^ D[6] ^ D[7] ^ D[9] ^ D[11] ^ D[12]; aux[13] = D[0] ^ D[3] ^ D[6] ^ D[7] ^ D[8] ^ D[10] ^ D[13]; aux[14] = D[0] ^ D[3] ^ D[4] ^ D[5] ^ D[9] ^ D[11] ^ D[14]; aux[15] = D[1] ^ D[2] ^ D[4] ^ D[5] ^ D[8] ^ D[10] ^ D[15]; //put the result into plaintext registers #pragma unroll for (int i = 0; i < 16; i++) { D[i] = aux[i]; } } template <unsigned int keySize> __global__ void Decrypt(uint8* cipherText, unsigned long textSize, uint8* dk, uint8* SB_gmem, uint8* IV) { unsigned int tid = threadIdx.x; unsigned int idx = blockIdx.x * blockDim.x + threadIdx.x; uint8 cipherTextR_1[16];//registers keeping the cipherText. uint8 cipherTextR_2[16];//registers keeping the cipherText. __shared__ uint8 keySmem[272];//each round key is 16 bytes, there are 17 round keys 272 bytes __shared__ uint8 SB1[256]; __shared__ uint8 SB2[256]; __shared__ uint8 SB3[256]; __shared__ uint8 SB4[256]; //Load decryption round keys to shared memory. keySmem[tid] = dk[tid]; //rest of the bytes(272 bytes) are loaded by first 16 threads. if (tid < 16) { keySmem[256 + tid] = dk[256 + tid]; } //Load SB tables to shared memory.(1024 bytes loaded by 256 threads) SB1[tid] = SB_gmem[tid]; SB2[tid] = SB_gmem[tid + 256]; SB3[tid] = SB_gmem[tid + 512]; SB4[tid] = SB_gmem[tid + 768]; //Load the plaintext to registers //Each thread is responsible for 16 bytes. if(idx == 0)//first thread is different than other threads.Uses IV. { for (int i = 0; i < 16; i++) { cipherTextR_1[i] = IV[i]; } } else { for (int i = 0; i < 16; i++) { cipherTextR_1[i] = cipherText[16 * idx -16 + i];//non-coalasced access, slow } } for (int i = 0; i < 16; i++) { cipherTextR_2[i] = cipherText[16 * idx + i];//non-coalasced access, slow } __syncthreads(); if (keySize == 16)//128-bit keys { F0_d(cipherTextR_2, &keySmem[0], SB1, SB2, SB3, SB4);//ek1... FE_d(cipherTextR_2, &keySmem[16], SB1, SB2, SB3, SB4); F0_d(cipherTextR_2, &keySmem[32], SB1, SB2, SB3, SB4); FE_d(cipherTextR_2, &keySmem[48], SB1, SB2, SB3, SB4); F0_d(cipherTextR_2, &keySmem[64], SB1, SB2, SB3, SB4); FE_d(cipherTextR_2, &keySmem[80], SB1, SB2, SB3, SB4); F0_d(cipherTextR_2, &keySmem[96], SB1, SB2, SB3, SB4); FE_d(cipherTextR_2, &keySmem[112], SB1, SB2, SB3, SB4); F0_d(cipherTextR_2, &keySmem[128], SB1, SB2, SB3, SB4); FE_d(cipherTextR_2, &keySmem[144], SB1, SB2, SB3, SB4); F0_d(cipherTextR_2, &keySmem[160], SB1, SB2, SB3, SB4);//...ek11 #pragma unroll for (int i = 0; i < 16; i++) { cipherTextR_2[i] = cipherTextR_2[i] ^ keySmem[176 + i];//ek12 } cipherTextR_2[0] = SB3[cipherTextR_2[0]]; cipherTextR_2[1] = SB4[cipherTextR_2[1]]; cipherTextR_2[2] = SB1[cipherTextR_2[2]]; cipherTextR_2[3] = SB2[cipherTextR_2[3]]; cipherTextR_2[4] = SB3[cipherTextR_2[4]]; cipherTextR_2[5] = SB4[cipherTextR_2[5]]; cipherTextR_2[6] = SB1[cipherTextR_2[6]]; cipherTextR_2[7] = SB2[cipherTextR_2[7]]; cipherTextR_2[8] = SB3[cipherTextR_2[8]]; cipherTextR_2[9] = SB4[cipherTextR_2[9]]; cipherTextR_2[10] = SB1[cipherTextR_2[10]]; cipherTextR_2[11] = SB2[cipherTextR_2[11]]; cipherTextR_2[12] = SB3[cipherTextR_2[12]]; cipherTextR_2[13] = SB4[cipherTextR_2[13]]; cipherTextR_2[14] = SB1[cipherTextR_2[14]]; cipherTextR_2[15] = SB2[cipherTextR_2[15]]; #pragma unroll for (int i = 0; i < 16; i++) { cipherTextR_2[i] = cipherTextR_2[i] ^ keySmem[192 + i];//ek13 } //XOR with the previous block. #pragma unroll for (int i = 0; i < 16; i++) { cipherTextR_2[i] = cipherTextR_2[i] ^ cipherTextR_1[i]; } //Write back to global memory for (int i = 0; i < 16; i++) { cipherText[16 * idx + i] = cipherTextR_2[i]; } } else if (keySize == 24)//192-bit keys { F0_d(cipherTextR_2, &keySmem[0], SB1, SB2, SB3, SB4);//ek1... FE_d(cipherTextR_2, &keySmem[16], SB1, SB2, SB3, SB4); F0_d(cipherTextR_2, &keySmem[32], SB1, SB2, SB3, SB4); FE_d(cipherTextR_2, &keySmem[48], SB1, SB2, SB3, SB4); F0_d(cipherTextR_2, &keySmem[64], SB1, SB2, SB3, SB4); FE_d(cipherTextR_2, &keySmem[80], SB1, SB2, SB3, SB4); F0_d(cipherTextR_2, &keySmem[96], SB1, SB2, SB3, SB4); FE_d(cipherTextR_2, &keySmem[112], SB1, SB2, SB3, SB4); F0_d(cipherTextR_2, &keySmem[128], SB1, SB2, SB3, SB4); FE_d(cipherTextR_2, &keySmem[144], SB1, SB2, SB3, SB4); F0_d(cipherTextR_2, &keySmem[160], SB1, SB2, SB3, SB4); FE_d(cipherTextR_2, &keySmem[176], SB1, SB2, SB3, SB4); F0_d(cipherTextR_2, &keySmem[192], SB1, SB2, SB3, SB4);//ek13 #pragma unroll for (int i = 0; i < 16; i++) { cipherTextR_2[i] = cipherTextR_2[i] ^ keySmem[208 + i];//ek14 } cipherTextR_2[0] = SB3[cipherTextR_2[0]]; cipherTextR_2[1] = SB4[cipherTextR_2[1]]; cipherTextR_2[2] = SB1[cipherTextR_2[2]]; cipherTextR_2[3] = SB2[cipherTextR_2[3]]; cipherTextR_2[4] = SB3[cipherTextR_2[4]]; cipherTextR_2[5] = SB4[cipherTextR_2[5]]; cipherTextR_2[6] = SB1[cipherTextR_2[6]]; cipherTextR_2[7] = SB2[cipherTextR_2[7]]; cipherTextR_2[8] = SB3[cipherTextR_2[8]]; cipherTextR_2[9] = SB4[cipherTextR_2[9]]; cipherTextR_2[10] = SB1[cipherTextR_2[10]]; cipherTextR_2[11] = SB2[cipherTextR_2[11]]; cipherTextR_2[12] = SB3[cipherTextR_2[12]]; cipherTextR_2[13] = SB4[cipherTextR_2[13]]; cipherTextR_2[14] = SB1[cipherTextR_2[14]]; cipherTextR_2[15] = SB2[cipherTextR_2[15]]; #pragma unroll for (int i = 0; i < 16; i++) { cipherTextR_2[i] = cipherTextR_2[i] ^ keySmem[224 + i];//ek15 } //XOR with the previous block. #pragma unroll for (int i = 0; i < 16; i++) { cipherTextR_2[i] = cipherTextR_2[i] ^ cipherTextR_1[i]; } //Write back to global memory for (int i = 0; i < 16; i++) { cipherText[16 * idx + i] = cipherTextR_2[i]; } } else//256-bit keys { F0_d(cipherTextR_2, &keySmem[0], SB1, SB2, SB3, SB4);//ek1... FE_d(cipherTextR_2, &keySmem[16], SB1, SB2, SB3, SB4); F0_d(cipherTextR_2, &keySmem[32], SB1, SB2, SB3, SB4); FE_d(cipherTextR_2, &keySmem[48], SB1, SB2, SB3, SB4); F0_d(cipherTextR_2, &keySmem[64], SB1, SB2, SB3, SB4); FE_d(cipherTextR_2, &keySmem[80], SB1, SB2, SB3, SB4); F0_d(cipherTextR_2, &keySmem[96], SB1, SB2, SB3, SB4); FE_d(cipherTextR_2, &keySmem[112], SB1, SB2, SB3, SB4); F0_d(cipherTextR_2, &keySmem[128], SB1, SB2, SB3, SB4); FE_d(cipherTextR_2, &keySmem[144], SB1, SB2, SB3, SB4); F0_d(cipherTextR_2, &keySmem[160], SB1, SB2, SB3, SB4); FE_d(cipherTextR_2, &keySmem[176], SB1, SB2, SB3, SB4); F0_d(cipherTextR_2, &keySmem[192], SB1, SB2, SB3, SB4); FE_d(cipherTextR_2, &keySmem[208], SB1, SB2, SB3, SB4); F0_d(cipherTextR_2, &keySmem[224], SB1, SB2, SB3, SB4);//ek15 #pragma unroll for (int i = 0; i < 16; i++) { cipherTextR_2[i] = cipherTextR_2[i] ^ keySmem[240 + i];//ek16 } cipherTextR_2[0] = SB3[cipherTextR_2[0]]; cipherTextR_2[1] = SB4[cipherTextR_2[1]]; cipherTextR_2[2] = SB1[cipherTextR_2[2]]; cipherTextR_2[3] = SB2[cipherTextR_2[3]]; cipherTextR_2[4] = SB3[cipherTextR_2[4]]; cipherTextR_2[5] = SB4[cipherTextR_2[5]]; cipherTextR_2[6] = SB1[cipherTextR_2[6]]; cipherTextR_2[7] = SB2[cipherTextR_2[7]]; cipherTextR_2[8] = SB3[cipherTextR_2[8]]; cipherTextR_2[9] = SB4[cipherTextR_2[9]]; cipherTextR_2[10] = SB1[cipherTextR_2[10]]; cipherTextR_2[11] = SB2[cipherTextR_2[11]]; cipherTextR_2[12] = SB3[cipherTextR_2[12]]; cipherTextR_2[13] = SB4[cipherTextR_2[13]]; cipherTextR_2[14] = SB1[cipherTextR_2[14]]; cipherTextR_2[15] = SB2[cipherTextR_2[15]]; #pragma unroll for (int i = 0; i < 16; i++) { cipherTextR_2[i] = cipherTextR_2[i] ^ keySmem[256 + i];//ek17 } //XOR with the previous block. #pragma unroll for (int i = 0; i < 16; i++) { cipherTextR_2[i] = cipherTextR_2[i] ^ cipherTextR_1[i]; } //Write back to global memory for (int i = 0; i < 16; i++) { cipherText[16 * idx + i] = cipherTextR_2[i]; } } } int main(void) { /////////INPUT PART BEGIN////////////////////// //Device pointers: uint8* deviceArr, *dk_d, *SB_dev, *IV_d; FILE *file; uint8* inputText;//ciphertext input unsigned long int fileLen, textSize; uint8 numOfRounds; const uint8 keySize = 32; uint8 key[32] = {0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x08, 0x09, 0x0a, 0x0b, 0x0c, 0x0d, 0x0e, 0x0f, 0x10, 0x11, 0x12, 0x13, 0x14, 0x15, 0x16, 0x17, 0x18, 0x19, 0x1a, 0x1b, 0x1c, 0x1d, 0x1e, 0x1f}; uint8 IV[16] = {0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00}; file = fopen("../input.txt", "r"); if (file) { char buf[2]; fseek(file, 0, SEEK_END); fileLen = ftell(file); fseek(file, 0, SEEK_SET); textSize = fileLen / 2; inputText = (uint8*)malloc(textSize); for (int i = 0; i < textSize; i++) { buf[0] = fgetc(file); buf[1] = fgetc(file); uint8 hexVal = (uint8)strtol(buf, NULL, 16); inputText[i] = hexVal; } } else { printf("File not found.\n"); return -1; } /////////INPUT PART END////////////////////// if (keySize == 16) numOfRounds = 13; else if (keySize == 24) numOfRounds = 15; else numOfRounds = 17; uint8 KL[16];//KL = leftmost 16 bytes of key uint8 KR[16];//KR = rightmost 16 bytes of key /* Most significant byte is stored in 0th index. KL = leftmost 16 bytes of key KR = rightmost 16 bytes of key */ for (int i = 0; i < 16; i++) { KL[i] = key[i]; } for (int i = 0; i < 16; i++) { KR[i] = key[i + 16]; } uint8* CK1, *CK2, *CK3; if (keySize == 16) { CK1 = C1; CK2 = C2; CK3 = C3; } else if (keySize == 24) { CK1 = C2; CK2 = C3; CK3 = C1; } else { CK1 = C3; CK2 = C1; CK3 = C2; } //Calculate round key generators W0,W1,W2,W3 uint8* W0 = KL; uint8 W1[16]; uint8 W2[16]; uint8 W3[16]; uint8 Fres[16];//auxilary array /* W0, W1, W2, W3 are calculated only once and used for all blocks. Since the key data W0 and CK1 are small enough this key generators are calculated in CPU. W1 needed for calc of W2, W2 needed for calc of W3. F0 and FE are also used in the encryption process. */ F0(W0, CK1, Fres); XOR_16(Fres, KR, W1); FE(W1, CK2, Fres); XOR_16(Fres, W0, W2); F0(W2, CK3, Fres); XOR_16(Fres, W1, W3); GenerateRoundKeys(W0, W1, W2, W3); /* Because each thread will process 16 bytes we need textSize/16 threads in total. Then thread number per block is: ceil(textSize/(16*blockSize)) bytes. To decide blockSize we must consider the main occupancy limiter, in this case number of registers per SM. Based on NVIDIA's programming guide Number of 32-bit registers per multiprocessor for compute capability >= 5.0 is 64K. In this code 16 registers used for plaintext, 16 registers auxilary, +1 by itself, each thread uses 33 registers. Then blocksize must be smaller than 64k/33. And larger than 272 since first 272 threads loads the shared memory. 512, 1024 are available blockSizes. 256 can also be tried but number of threads loading the shared memory must be decreased. Keeping the round keys in registers results in low number of warps per SM therefore poor performance. */ int blockSize = 256; int numOfBlocks = ceil((float)(textSize) / (16 * blockSize)); //Decryption round keys are derived from the encryption round keys which is generated by GenerateRoundKeys. GenerateDecRoundKeys(numOfRounds); uint8* resPlainText = (uint8*)malloc(textSize); hipMalloc((void**)& deviceArr, textSize); hipMalloc((void**)& dk_d, 272); hipMalloc((void**)& IV_d, 16); hipMalloc((void**)& SB_dev, 1024); //START TIMER. using namespace std::chrono; high_resolution_clock::time_point start = high_resolution_clock::now(); hipMemcpy(deviceArr, inputText, textSize, hipMemcpyHostToDevice); hipMemcpy(dk_d, dk, 272, hipMemcpyHostToDevice); hipMemcpy(IV_d, IV, 16, hipMemcpyHostToDevice); //Move Substition layer tables to global memory.(will be moved to shared memory in the kernel.) hipMemcpy(SB_dev, SB1, 256, hipMemcpyHostToDevice); hipMemcpy(SB_dev + 256, SB2, 256, hipMemcpyHostToDevice); hipMemcpy(SB_dev + 512, SB3, 256, hipMemcpyHostToDevice); hipMemcpy(SB_dev + 768, SB4, 256, hipMemcpyHostToDevice); Decrypt<keySize> << <numOfBlocks, blockSize >> > (deviceArr, textSize, dk_d, SB_dev ,IV_d); hipMemcpy(resPlainText, deviceArr, textSize, hipMemcpyDeviceToHost); //END TIMER; PRINT ELAPSED TIME. high_resolution_clock::time_point end = high_resolution_clock::now(); duration<double> timeElapsed = duration_cast<duration<double>>(end - start); std::cout << "Time elapsed: " << timeElapsed.count() << std::endl; //Print/write to file FILE *f = fopen("output.txt", "w"); for (int i = 0; i < textSize; i++) { fprintf(f, "%02x", resPlainText[i]); } fclose(f); //free hipFree(deviceArr); hipFree(dk_d); free(resPlainText); return 0; }
7cda6ce230dab4cefd606b8c358fcbbe1d2bafcc.cu
/* This version assigns one thread per 16 bytes of text.(one text block) Stores the plaintext/ciphertext in registers. Stores the encryption keys in shared memory. Stores the S-boxes in shared memory. The blocksize is 512. */ #include <iostream> #include <fstream> #include <sstream> #include <chrono> #include <cuda_runtime_api.h> #include <device_launch_parameters.h> typedef unsigned char uint8; //Key generation constants uint8 C1[] = { 0x51,0x7c,0xc1,0xb7,0x27,0x22,0x0a,0x94,0xfe,0x13,0xab,0xe8,0xfa,0x9a,0x6e,0xe0 }; uint8 C2[] = { 0x6d,0xb1,0x4a,0xcc,0x9e,0x21,0xc8,0x20,0xff,0x28,0xb1,0xd5,0xef,0x5d,0xe2,0xb0 }; uint8 C3[] = { 0xdb,0x92,0x37,0x1d,0x21,0x26,0xe9,0x70,0x03,0x24,0x97,0x75,0x04,0xe8,0xc9,0x0e }; //Encryption round keys uint8 ek[272] = { 0 }; //272 bytes(17 round keys each 16 bytes) //Decyription round keys uint8 dk[272] = { 0 }; //272 bytes(17 round keys each 16 bytes) //S-boxes static const uint8 SB1[256] = { 0x63, 0x7C, 0x77, 0x7B, 0xF2, 0x6B, 0x6F, 0xC5, 0x30, 0x01, 0x67, 0x2B, 0xFE, 0xD7, 0xAB, 0x76, 0xCA, 0x82, 0xC9, 0x7D, 0xFA, 0x59, 0x47, 0xF0, 0xAD, 0xD4, 0xA2, 0xAF, 0x9C, 0xA4, 0x72, 0xC0, 0xB7, 0xFD, 0x93, 0x26, 0x36, 0x3F, 0xF7, 0xCC, 0x34, 0xA5, 0xE5, 0xF1, 0x71, 0xD8, 0x31, 0x15, 0x04, 0xC7, 0x23, 0xC3, 0x18, 0x96, 0x05, 0x9A, 0x07, 0x12, 0x80, 0xE2, 0xEB, 0x27, 0xB2, 0x75, 0x09, 0x83, 0x2C, 0x1A, 0x1B, 0x6E, 0x5A, 0xA0, 0x52, 0x3B, 0xD6, 0xB3, 0x29, 0xE3, 0x2F, 0x84, 0x53, 0xD1, 0x00, 0xED, 0x20, 0xFC, 0xB1, 0x5B, 0x6A, 0xCB, 0xBE, 0x39, 0x4A, 0x4C, 0x58, 0xCF, 0xD0, 0xEF, 0xAA, 0xFB, 0x43, 0x4D, 0x33, 0x85, 0x45, 0xF9, 0x02, 0x7F, 0x50, 0x3C, 0x9F, 0xA8, 0x51, 0xA3, 0x40, 0x8F, 0x92, 0x9D, 0x38, 0xF5, 0xBC, 0xB6, 0xDA, 0x21, 0x10, 0xFF, 0xF3, 0xD2, 0xCD, 0x0C, 0x13, 0xEC, 0x5F, 0x97, 0x44, 0x17, 0xC4, 0xA7, 0x7E, 0x3D, 0x64, 0x5D, 0x19, 0x73, 0x60, 0x81, 0x4F, 0xDC, 0x22, 0x2A, 0x90, 0x88, 0x46, 0xEE, 0xB8, 0x14, 0xDE, 0x5E, 0x0B, 0xDB, 0xE0, 0x32, 0x3A, 0x0A, 0x49, 0x06, 0x24, 0x5C, 0xC2, 0xD3, 0xAC, 0x62, 0x91, 0x95, 0xE4, 0x79, 0xE7, 0xC8, 0x37, 0x6D, 0x8D, 0xD5, 0x4E, 0xA9, 0x6C, 0x56, 0xF4, 0xEA, 0x65, 0x7A, 0xAE, 0x08, 0xBA, 0x78, 0x25, 0x2E, 0x1C, 0xA6, 0xB4, 0xC6, 0xE8, 0xDD, 0x74, 0x1F, 0x4B, 0xBD, 0x8B, 0x8A, 0x70, 0x3E, 0xB5, 0x66, 0x48, 0x03, 0xF6, 0x0E, 0x61, 0x35, 0x57, 0xB9, 0x86, 0xC1, 0x1D, 0x9E, 0xE1, 0xF8, 0x98, 0x11, 0x69, 0xD9, 0x8E, 0x94, 0x9B, 0x1E, 0x87, 0xE9, 0xCE, 0x55, 0x28, 0xDF, 0x8C, 0xA1, 0x89, 0x0D, 0xBF, 0xE6, 0x42, 0x68, 0x41, 0x99, 0x2D, 0x0F, 0xB0, 0x54, 0xBB, 0x16 }; static const uint8 SB2[256] = { 0xE2, 0x4E, 0x54, 0xFC, 0x94, 0xC2, 0x4A, 0xCC, 0x62, 0x0D, 0x6A, 0x46, 0x3C, 0x4D, 0x8B, 0xD1, 0x5E, 0xFA, 0x64, 0xCB, 0xB4, 0x97, 0xBE, 0x2B, 0xBC, 0x77, 0x2E, 0x03, 0xD3, 0x19, 0x59, 0xC1, 0x1D, 0x06, 0x41, 0x6B, 0x55, 0xF0, 0x99, 0x69, 0xEA, 0x9C, 0x18, 0xAE, 0x63, 0xDF, 0xE7, 0xBB, 0x00, 0x73, 0x66, 0xFB, 0x96, 0x4C, 0x85, 0xE4, 0x3A, 0x09, 0x45, 0xAA, 0x0F, 0xEE, 0x10, 0xEB, 0x2D, 0x7F, 0xF4, 0x29, 0xAC, 0xCF, 0xAD, 0x91, 0x8D, 0x78, 0xC8, 0x95, 0xF9, 0x2F, 0xCE, 0xCD, 0x08, 0x7A, 0x88, 0x38, 0x5C, 0x83, 0x2A, 0x28, 0x47, 0xDB, 0xB8, 0xC7, 0x93, 0xA4, 0x12, 0x53, 0xFF, 0x87, 0x0E, 0x31, 0x36, 0x21, 0x58, 0x48, 0x01, 0x8E, 0x37, 0x74, 0x32, 0xCA, 0xE9, 0xB1, 0xB7, 0xAB, 0x0C, 0xD7, 0xC4, 0x56, 0x42, 0x26, 0x07, 0x98, 0x60, 0xD9, 0xB6, 0xB9, 0x11, 0x40, 0xEC, 0x20, 0x8C, 0xBD, 0xA0, 0xC9, 0x84, 0x04, 0x49, 0x23, 0xF1, 0x4F, 0x50, 0x1F, 0x13, 0xDC, 0xD8, 0xC0, 0x9E, 0x57, 0xE3, 0xC3, 0x7B, 0x65, 0x3B, 0x02, 0x8F, 0x3E, 0xE8, 0x25, 0x92, 0xE5, 0x15, 0xDD, 0xFD, 0x17, 0xA9, 0xBF, 0xD4, 0x9A, 0x7E, 0xC5, 0x39, 0x67, 0xFE, 0x76, 0x9D, 0x43, 0xA7, 0xE1, 0xD0, 0xF5, 0x68, 0xF2, 0x1B, 0x34, 0x70, 0x05, 0xA3, 0x8A, 0xD5, 0x79, 0x86, 0xA8, 0x30, 0xC6, 0x51, 0x4B, 0x1E, 0xA6, 0x27, 0xF6, 0x35, 0xD2, 0x6E, 0x24, 0x16, 0x82, 0x5F, 0xDA, 0xE6, 0x75, 0xA2, 0xEF, 0x2C, 0xB2, 0x1C, 0x9F, 0x5D, 0x6F, 0x80, 0x0A, 0x72, 0x44, 0x9B, 0x6C, 0x90, 0x0B, 0x5B, 0x33, 0x7D, 0x5A, 0x52, 0xF3, 0x61, 0xA1, 0xF7, 0xB0, 0xD6, 0x3F, 0x7C, 0x6D, 0xED, 0x14, 0xE0, 0xA5, 0x3D, 0x22, 0xB3, 0xF8, 0x89, 0xDE, 0x71, 0x1A, 0xAF, 0xBA, 0xB5, 0x81 }; static const uint8 SB3[256] = { 0x52, 0x09, 0x6A, 0xD5, 0x30, 0x36, 0xA5, 0x38, 0xBF, 0x40, 0xA3, 0x9E, 0x81, 0xF3, 0xD7, 0xFB, 0x7C, 0xE3, 0x39, 0x82, 0x9B, 0x2F, 0xFF, 0x87, 0x34, 0x8E, 0x43, 0x44, 0xC4, 0xDE, 0xE9, 0xCB, 0x54, 0x7B, 0x94, 0x32, 0xA6, 0xC2, 0x23, 0x3D, 0xEE, 0x4C, 0x95, 0x0B, 0x42, 0xFA, 0xC3, 0x4E, 0x08, 0x2E, 0xA1, 0x66, 0x28, 0xD9, 0x24, 0xB2, 0x76, 0x5B, 0xA2, 0x49, 0x6D, 0x8B, 0xD1, 0x25, 0x72, 0xF8, 0xF6, 0x64, 0x86, 0x68, 0x98, 0x16, 0xD4, 0xA4, 0x5C, 0xCC, 0x5D, 0x65, 0xB6, 0x92, 0x6C, 0x70, 0x48, 0x50, 0xFD, 0xED, 0xB9, 0xDA, 0x5E, 0x15, 0x46, 0x57, 0xA7, 0x8D, 0x9D, 0x84, 0x90, 0xD8, 0xAB, 0x00, 0x8C, 0xBC, 0xD3, 0x0A, 0xF7, 0xE4, 0x58, 0x05, 0xB8, 0xB3, 0x45, 0x06, 0xD0, 0x2C, 0x1E, 0x8F, 0xCA, 0x3F, 0x0F, 0x02, 0xC1, 0xAF, 0xBD, 0x03, 0x01, 0x13, 0x8A, 0x6B, 0x3A, 0x91, 0x11, 0x41, 0x4F, 0x67, 0xDC, 0xEA, 0x97, 0xF2, 0xCF, 0xCE, 0xF0, 0xB4, 0xE6, 0x73, 0x96, 0xAC, 0x74, 0x22, 0xE7, 0xAD, 0x35, 0x85, 0xE2, 0xF9, 0x37, 0xE8, 0x1C, 0x75, 0xDF, 0x6E, 0x47, 0xF1, 0x1A, 0x71, 0x1D, 0x29, 0xC5, 0x89, 0x6F, 0xB7, 0x62, 0x0E, 0xAA, 0x18, 0xBE, 0x1B, 0xFC, 0x56, 0x3E, 0x4B, 0xC6, 0xD2, 0x79, 0x20, 0x9A, 0xDB, 0xC0, 0xFE, 0x78, 0xCD, 0x5A, 0xF4, 0x1F, 0xDD, 0xA8, 0x33, 0x88, 0x07, 0xC7, 0x31, 0xB1, 0x12, 0x10, 0x59, 0x27, 0x80, 0xEC, 0x5F, 0x60, 0x51, 0x7F, 0xA9, 0x19, 0xB5, 0x4A, 0x0D, 0x2D, 0xE5, 0x7A, 0x9F, 0x93, 0xC9, 0x9C, 0xEF, 0xA0, 0xE0, 0x3B, 0x4D, 0xAE, 0x2A, 0xF5, 0xB0, 0xC8, 0xEB, 0xBB, 0x3C, 0x83, 0x53, 0x99, 0x61, 0x17, 0x2B, 0x04, 0x7E, 0xBA, 0x77, 0xD6, 0x26, 0xE1, 0x69, 0x14, 0x63, 0x55, 0x21, 0x0C, 0x7D }; static const uint8 SB4[256] = { 0x30, 0x68, 0x99, 0x1B, 0x87, 0xB9, 0x21, 0x78, 0x50, 0x39, 0xDB, 0xE1, 0x72, 0x09, 0x62, 0x3C, 0x3E, 0x7E, 0x5E, 0x8E, 0xF1, 0xA0, 0xCC, 0xA3, 0x2A, 0x1D, 0xFB, 0xB6, 0xD6, 0x20, 0xC4, 0x8D, 0x81, 0x65, 0xF5, 0x89, 0xCB, 0x9D, 0x77, 0xC6, 0x57, 0x43, 0x56, 0x17, 0xD4, 0x40, 0x1A, 0x4D, 0xC0, 0x63, 0x6C, 0xE3, 0xB7, 0xC8, 0x64, 0x6A, 0x53, 0xAA, 0x38, 0x98, 0x0C, 0xF4, 0x9B, 0xED, 0x7F, 0x22, 0x76, 0xAF, 0xDD, 0x3A, 0x0B, 0x58, 0x67, 0x88, 0x06, 0xC3, 0x35, 0x0D, 0x01, 0x8B, 0x8C, 0xC2, 0xE6, 0x5F, 0x02, 0x24, 0x75, 0x93, 0x66, 0x1E, 0xE5, 0xE2, 0x54, 0xD8, 0x10, 0xCE, 0x7A, 0xE8, 0x08, 0x2C, 0x12, 0x97, 0x32, 0xAB, 0xB4, 0x27, 0x0A, 0x23, 0xDF, 0xEF, 0xCA, 0xD9, 0xB8, 0xFA, 0xDC, 0x31, 0x6B, 0xD1, 0xAD, 0x19, 0x49, 0xBD, 0x51, 0x96, 0xEE, 0xE4, 0xA8, 0x41, 0xDA, 0xFF, 0xCD, 0x55, 0x86, 0x36, 0xBE, 0x61, 0x52, 0xF8, 0xBB, 0x0E, 0x82, 0x48, 0x69, 0x9A, 0xE0, 0x47, 0x9E, 0x5C, 0x04, 0x4B, 0x34, 0x15, 0x79, 0x26, 0xA7, 0xDE, 0x29, 0xAE, 0x92, 0xD7, 0x84, 0xE9, 0xD2, 0xBA, 0x5D, 0xF3, 0xC5, 0xB0, 0xBF, 0xA4, 0x3B, 0x71, 0x44, 0x46, 0x2B, 0xFC, 0xEB, 0x6F, 0xD5, 0xF6, 0x14, 0xFE, 0x7C, 0x70, 0x5A, 0x7D, 0xFD, 0x2F, 0x18, 0x83, 0x16, 0xA5, 0x91, 0x1F, 0x05, 0x95, 0x74, 0xA9, 0xC1, 0x5B, 0x4A, 0x85, 0x6D, 0x13, 0x07, 0x4F, 0x4E, 0x45, 0xB2, 0x0F, 0xC9, 0x1C, 0xA6, 0xBC, 0xEC, 0x73, 0x90, 0x7B, 0xCF, 0x59, 0x8F, 0xA1, 0xF9, 0x2D, 0xF2, 0xB1, 0x00, 0x94, 0x37, 0x9F, 0xD0, 0x2E, 0x9C, 0x6E, 0x28, 0x3F, 0x80, 0xF0, 0x3D, 0xD3, 0x25, 0x8A, 0xB5, 0xE7, 0x42, 0xB3, 0xC7, 0xEA, 0xF7, 0x4C, 0x11, 0x33, 0x03, 0xA2, 0xAC, 0x60 }; uint8 hex2dec(char ch) { if (ch >= '0' && ch <= '9') return ch - '0'; else return ch - 'a' + 10; } uint8 leftRotate(uint8 n, uint8 d) { return (n << d) | (n >> (8 - d)); } uint8 rightRotate(uint8 n, uint8 d) { return (n >> d) | (n << (8 - d)); } uint8* RightShiftBytes(uint8* arr, int arrSize, int amount)//shift the bytes, place them in a new array { uint8* tmp = (uint8*) malloc(amount); uint8* newArr = (uint8*)malloc(16 * sizeof(uint8)); for (int i = 0; i < amount; i++) { tmp[i] = arr[arrSize - amount + i]; } for (int i = arrSize - 1; i >= amount; i--) { newArr[i] = arr[i - amount]; } for (int i = 0; i < amount; i++) { newArr[i] = tmp[i]; } free(tmp); return newArr; } uint8* LeftShiftBytes(uint8* arr, int arrSize, int amount)//shift the bytes, place them in a new array { uint8* tmp = (uint8*)malloc(amount); uint8* newArr = (uint8*)malloc(16 * sizeof(uint8)); for (int i = 0; i < amount; i++) { tmp[i] = arr[i]; } for (int i = 0; i < arrSize - amount; i++) { newArr[i] = arr[i + amount]; } for (int i = 0; i < amount; i++) { newArr[arrSize - amount + i] = tmp[i]; } free(tmp); return newArr; } uint8* ShiftArrR(uint8* originalArr, int amount) { int arrSize = 16; int byteShiftAmount = amount / 8; uint8* arr = RightShiftBytes(originalArr, arrSize, byteShiftAmount); amount = amount - byteShiftAmount * 8; uint8 carryTmp, carry; carry = arr[arrSize - 1] & (0xff >> (8 - amount));//bits that are shifted to byte on right for (int i = 0; i < arrSize; i++) { carryTmp = arr[i] & (0xff >> (8 - amount));//calculate carry for byte on right arr[i] >>= amount;//right shift the current byte. arr[i] |= rightRotate(carry, amount);//place the bits from coming from byte on left carry = carryTmp; } return arr; } uint8* ShiftArrL(uint8* originalArr, int amount) { int arrSize = 16; int byteShiftAmount = amount / 8; uint8* arr = LeftShiftBytes(originalArr, arrSize, byteShiftAmount); amount = amount - byteShiftAmount * 8; uint8 carryTmp, carry; carry = arr[0] & (0xff << (8 - amount));//bits that are shifted to byte on left for (int i = arrSize - 1; i >= 0; i--) { carryTmp = arr[i] & (0xff << (8 - amount));//calculate carry for byte on left arr[i] <<= amount;//left shift the current byte. arr[i] |= leftRotate(carry, amount);//place the bits from coming from byte on right carry = carryTmp; } return arr; } void XOR_16(uint8* x, uint8* y, uint8* z) { for (int i = 0; i < 16; i++) { z[i] = x[i] ^ y[i]; } } void XOR_16wFree(uint8* x, uint8* y, uint8* z) { for (int i = 0; i < 16; i++) { z[i] = x[i] ^ y[i]; } free(y); } //Substition Layer 1 void SL1(uint8* in, uint8* out) { out[0] = SB1[in[0]]; out[1] = SB2[in[1]]; out[2] = SB3[in[2]]; out[3] = SB4[in[3]]; out[4] = SB1[in[4]]; out[5] = SB2[in[5]]; out[6] = SB3[in[6]]; out[7] = SB4[in[7]]; out[8] = SB1[in[8]]; out[9] = SB2[in[9]]; out[10] = SB3[in[10]]; out[11] = SB4[in[11]]; out[12] = SB1[in[12]]; out[13] = SB2[in[13]]; out[14] = SB3[in[14]]; out[15] = SB4[in[15]]; } //Substition Layer 2(Inverse of SL1) void SL2(uint8* in, uint8* out) { out[0] = SB3[in[0]]; out[1] = SB4[in[1]]; out[2] = SB1[in[2]]; out[3] = SB2[in[3]]; out[4] = SB3[in[4]]; out[5] = SB4[in[5]]; out[6] = SB1[in[6]]; out[7] = SB2[in[7]]; out[8] = SB3[in[8]]; out[9] = SB4[in[9]]; out[10] = SB1[in[10]]; out[11] = SB2[in[11]]; out[12] = SB3[in[12]]; out[13] = SB4[in[13]]; out[14] = SB1[in[14]]; out[15] = SB2[in[15]]; } //Diffusion layer void A(uint8* in, uint8* out) { out[0] = in[3] ^ in[4] ^ in[6] ^ in[8] ^ in[9] ^ in[13] ^ in[14]; out[1] = in[2] ^ in[5] ^ in[7] ^ in[8] ^ in[9] ^ in[12] ^ in[15]; out[2] = in[1] ^ in[4] ^ in[6] ^ in[10] ^ in[11] ^ in[12] ^ in[15]; out[3] = in[0] ^ in[5] ^ in[7] ^ in[10] ^ in[11] ^ in[13] ^ in[14]; out[4] = in[0] ^ in[2] ^ in[5] ^ in[8] ^ in[11] ^ in[14] ^ in[15]; out[5] = in[1] ^ in[3] ^ in[4] ^ in[9] ^ in[10] ^ in[14] ^ in[15]; out[6] = in[0] ^ in[2] ^ in[7] ^ in[9] ^ in[10] ^ in[12] ^ in[13]; out[7] = in[1] ^ in[3] ^ in[6] ^ in[8] ^ in[11] ^ in[12] ^ in[13]; out[8] = in[0] ^ in[1] ^ in[4] ^ in[7] ^ in[10] ^ in[13] ^ in[15]; out[9] = in[0] ^ in[1] ^ in[5] ^ in[6] ^ in[11] ^ in[12] ^ in[14]; out[10] = in[2] ^ in[3] ^ in[5] ^ in[6] ^ in[8] ^ in[13] ^ in[15]; out[11] = in[2] ^ in[3] ^ in[4] ^ in[7] ^ in[9] ^ in[12] ^ in[14]; out[12] = in[1] ^ in[2] ^ in[6] ^ in[7] ^ in[9] ^ in[11] ^ in[12]; out[13] = in[0] ^ in[3] ^ in[6] ^ in[7] ^ in[8] ^ in[10] ^ in[13]; out[14] = in[0] ^ in[3] ^ in[4] ^ in[5] ^ in[9] ^ in[11] ^ in[14]; out[15] = in[1] ^ in[2] ^ in[4] ^ in[5] ^ in[8] ^ in[10] ^ in[15]; } /*Round Functions(F0,FE) takes 16 bytes of plaintext and generates an intermediate val of 16bytes */ //Odd Round Function void F0(uint8* D, uint8* RK, uint8* out) { //res1, res2 are auxillary arrays for storing the results of XOR_16 and SL1 uint8 res1[16]; uint8 res2[16]; XOR_16(D, RK, res1); SL1(res1, res2); A(res2, out); } //Even Round Function void FE(uint8* D, uint8* RK, uint8* out) { //res1, res2 are auxillary arrays for storing the results of XOR_16 and SL1 uint8 res1[16]; uint8 res2[16]; XOR_16(D, RK, res1); SL2(res1, res2); A(res2, out); } void GenerateRoundKeys(uint8* W0, uint8* W1, uint8* W2, uint8* W3) { //Producing encryption round keys //Producing encryption round keys can be parallelized. //However since we do this once for all blocks, it is faster to compute in CPU. //ShiftArr functions return array from heap, must free. XOR_16wFree(W0, ShiftArrR(W1, 19), &ek[0]); XOR_16wFree(W1, ShiftArrR(W2, 19), &ek[16]); XOR_16wFree(W2, ShiftArrR(W3, 19), &ek[32]); XOR_16wFree(W3, ShiftArrR(W0, 19), &ek[48]); XOR_16wFree(W0, ShiftArrR(W1, 31), &ek[64]); XOR_16wFree(W1, ShiftArrR(W2, 31), &ek[80]); XOR_16wFree(W2, ShiftArrR(W3, 31), &ek[96]); XOR_16wFree(W3, ShiftArrR(W0, 31), &ek[112]); XOR_16wFree(W0, ShiftArrL(W1, 61), &ek[128]); XOR_16wFree(W1, ShiftArrL(W2, 61), &ek[144]); XOR_16wFree(W2, ShiftArrL(W3, 61), &ek[160]); XOR_16wFree(W3, ShiftArrL(W0, 61), &ek[176]); XOR_16wFree(W0, ShiftArrL(W1, 31), &ek[192]); XOR_16wFree(W1, ShiftArrL(W2, 31), &ek[208]); XOR_16wFree(W2, ShiftArrL(W3, 31), &ek[224]); XOR_16wFree(W3, ShiftArrL(W0, 31), &ek[240]); XOR_16wFree(W0, ShiftArrL(W1, 19), &ek[256]); } void GenerateDecRoundKeys(uint8 numOfRounds) { int N = numOfRounds - 1; int k = 1; for (int i = 0; i < 16; i++) { dk[i] = ek[16 * N + i]; } for (int i = N - 1; i >= 1; i--) { A(&ek[i * 16], &dk[k * 16]); k++; } for (int i = 0; i < 16; i++) { dk[k * 16 + i] = ek[i]; } } //Odd Round Function __device__ void F0_d(uint8* D, const uint8* RK, uint8* SB1, uint8* SB2, uint8* SB3, uint8* SB4) { uint8 aux[16];//auxilary array for keeping the results of Diffusion layer //XOR with the round key #pragma unroll for (int i = 0; i < 16; i++) { D[i] = D[i] ^ RK[i]; } //Substition Layer(SL1) D[0] = SB1[D[0]]; D[1] = SB2[D[1]]; D[2] = SB3[D[2]]; D[3] = SB4[D[3]]; D[4] = SB1[D[4]]; D[5] = SB2[D[5]]; D[6] = SB3[D[6]]; D[7] = SB4[D[7]]; D[8] = SB1[D[8]]; D[9] = SB2[D[9]]; D[10] = SB3[D[10]]; D[11] = SB4[D[11]]; D[12] = SB1[D[12]]; D[13] = SB2[D[13]]; D[14] = SB3[D[14]]; D[15] = SB4[D[15]]; //Diffusion layer aux[0] = D[3] ^ D[4] ^ D[6] ^ D[8] ^ D[9] ^ D[13] ^ D[14]; aux[1] = D[2] ^ D[5] ^ D[7] ^ D[8] ^ D[9] ^ D[12] ^ D[15]; aux[2] = D[1] ^ D[4] ^ D[6] ^ D[10] ^ D[11] ^ D[12] ^ D[15]; aux[3] = D[0] ^ D[5] ^ D[7] ^ D[10] ^ D[11] ^ D[13] ^ D[14]; aux[4] = D[0] ^ D[2] ^ D[5] ^ D[8] ^ D[11] ^ D[14] ^ D[15]; aux[5] = D[1] ^ D[3] ^ D[4] ^ D[9] ^ D[10] ^ D[14] ^ D[15]; aux[6] = D[0] ^ D[2] ^ D[7] ^ D[9] ^ D[10] ^ D[12] ^ D[13]; aux[7] = D[1] ^ D[3] ^ D[6] ^ D[8] ^ D[11] ^ D[12] ^ D[13]; aux[8] = D[0] ^ D[1] ^ D[4] ^ D[7] ^ D[10] ^ D[13] ^ D[15]; aux[9] = D[0] ^ D[1] ^ D[5] ^ D[6] ^ D[11] ^ D[12] ^ D[14]; aux[10] = D[2] ^ D[3] ^ D[5] ^ D[6] ^ D[8] ^ D[13] ^ D[15]; aux[11] = D[2] ^ D[3] ^ D[4] ^ D[7] ^ D[9] ^ D[12] ^ D[14]; aux[12] = D[1] ^ D[2] ^ D[6] ^ D[7] ^ D[9] ^ D[11] ^ D[12]; aux[13] = D[0] ^ D[3] ^ D[6] ^ D[7] ^ D[8] ^ D[10] ^ D[13]; aux[14] = D[0] ^ D[3] ^ D[4] ^ D[5] ^ D[9] ^ D[11] ^ D[14]; aux[15] = D[1] ^ D[2] ^ D[4] ^ D[5] ^ D[8] ^ D[10] ^ D[15]; //put the result into plaintext registers #pragma unroll for (int i = 0; i < 16; i++) { D[i] = aux[i]; } } //Even Round Function __device__ void FE_d(uint8* D, const uint8* RK, uint8* SB1, uint8* SB2, uint8* SB3, uint8* SB4) { uint8 aux[16];//auxilary array for keeping the results of Diffusion layer //XOR with the round key #pragma unroll for (int i = 0; i < 16; i++) { D[i] = D[i] ^ RK[i]; } //Substition Layer(SL2) D[0] = SB3[D[0]]; D[1] = SB4[D[1]]; D[2] = SB1[D[2]]; D[3] = SB2[D[3]]; D[4] = SB3[D[4]]; D[5] = SB4[D[5]]; D[6] = SB1[D[6]]; D[7] = SB2[D[7]]; D[8] = SB3[D[8]]; D[9] = SB4[D[9]]; D[10] = SB1[D[10]]; D[11] = SB2[D[11]]; D[12] = SB3[D[12]]; D[13] = SB4[D[13]]; D[14] = SB1[D[14]]; D[15] = SB2[D[15]]; //Diffusion layer aux[0] = D[3] ^ D[4] ^ D[6] ^ D[8] ^ D[9] ^ D[13] ^ D[14]; aux[1] = D[2] ^ D[5] ^ D[7] ^ D[8] ^ D[9] ^ D[12] ^ D[15]; aux[2] = D[1] ^ D[4] ^ D[6] ^ D[10] ^ D[11] ^ D[12] ^ D[15]; aux[3] = D[0] ^ D[5] ^ D[7] ^ D[10] ^ D[11] ^ D[13] ^ D[14]; aux[4] = D[0] ^ D[2] ^ D[5] ^ D[8] ^ D[11] ^ D[14] ^ D[15]; aux[5] = D[1] ^ D[3] ^ D[4] ^ D[9] ^ D[10] ^ D[14] ^ D[15]; aux[6] = D[0] ^ D[2] ^ D[7] ^ D[9] ^ D[10] ^ D[12] ^ D[13]; aux[7] = D[1] ^ D[3] ^ D[6] ^ D[8] ^ D[11] ^ D[12] ^ D[13]; aux[8] = D[0] ^ D[1] ^ D[4] ^ D[7] ^ D[10] ^ D[13] ^ D[15]; aux[9] = D[0] ^ D[1] ^ D[5] ^ D[6] ^ D[11] ^ D[12] ^ D[14]; aux[10] = D[2] ^ D[3] ^ D[5] ^ D[6] ^ D[8] ^ D[13] ^ D[15]; aux[11] = D[2] ^ D[3] ^ D[4] ^ D[7] ^ D[9] ^ D[12] ^ D[14]; aux[12] = D[1] ^ D[2] ^ D[6] ^ D[7] ^ D[9] ^ D[11] ^ D[12]; aux[13] = D[0] ^ D[3] ^ D[6] ^ D[7] ^ D[8] ^ D[10] ^ D[13]; aux[14] = D[0] ^ D[3] ^ D[4] ^ D[5] ^ D[9] ^ D[11] ^ D[14]; aux[15] = D[1] ^ D[2] ^ D[4] ^ D[5] ^ D[8] ^ D[10] ^ D[15]; //put the result into plaintext registers #pragma unroll for (int i = 0; i < 16; i++) { D[i] = aux[i]; } } template <unsigned int keySize> __global__ void Decrypt(uint8* cipherText, unsigned long textSize, uint8* dk, uint8* SB_gmem, uint8* IV) { unsigned int tid = threadIdx.x; unsigned int idx = blockIdx.x * blockDim.x + threadIdx.x; uint8 cipherTextR_1[16];//registers keeping the cipherText. uint8 cipherTextR_2[16];//registers keeping the cipherText. __shared__ uint8 keySmem[272];//each round key is 16 bytes, there are 17 round keys 272 bytes __shared__ uint8 SB1[256]; __shared__ uint8 SB2[256]; __shared__ uint8 SB3[256]; __shared__ uint8 SB4[256]; //Load decryption round keys to shared memory. keySmem[tid] = dk[tid]; //rest of the bytes(272 bytes) are loaded by first 16 threads. if (tid < 16) { keySmem[256 + tid] = dk[256 + tid]; } //Load SB tables to shared memory.(1024 bytes loaded by 256 threads) SB1[tid] = SB_gmem[tid]; SB2[tid] = SB_gmem[tid + 256]; SB3[tid] = SB_gmem[tid + 512]; SB4[tid] = SB_gmem[tid + 768]; //Load the plaintext to registers //Each thread is responsible for 16 bytes. if(idx == 0)//first thread is different than other threads.Uses IV. { for (int i = 0; i < 16; i++) { cipherTextR_1[i] = IV[i]; } } else { for (int i = 0; i < 16; i++) { cipherTextR_1[i] = cipherText[16 * idx -16 + i];//non-coalasced access, slow } } for (int i = 0; i < 16; i++) { cipherTextR_2[i] = cipherText[16 * idx + i];//non-coalasced access, slow } __syncthreads(); if (keySize == 16)//128-bit keys { F0_d(cipherTextR_2, &keySmem[0], SB1, SB2, SB3, SB4);//ek1... FE_d(cipherTextR_2, &keySmem[16], SB1, SB2, SB3, SB4); F0_d(cipherTextR_2, &keySmem[32], SB1, SB2, SB3, SB4); FE_d(cipherTextR_2, &keySmem[48], SB1, SB2, SB3, SB4); F0_d(cipherTextR_2, &keySmem[64], SB1, SB2, SB3, SB4); FE_d(cipherTextR_2, &keySmem[80], SB1, SB2, SB3, SB4); F0_d(cipherTextR_2, &keySmem[96], SB1, SB2, SB3, SB4); FE_d(cipherTextR_2, &keySmem[112], SB1, SB2, SB3, SB4); F0_d(cipherTextR_2, &keySmem[128], SB1, SB2, SB3, SB4); FE_d(cipherTextR_2, &keySmem[144], SB1, SB2, SB3, SB4); F0_d(cipherTextR_2, &keySmem[160], SB1, SB2, SB3, SB4);//...ek11 #pragma unroll for (int i = 0; i < 16; i++) { cipherTextR_2[i] = cipherTextR_2[i] ^ keySmem[176 + i];//ek12 } cipherTextR_2[0] = SB3[cipherTextR_2[0]]; cipherTextR_2[1] = SB4[cipherTextR_2[1]]; cipherTextR_2[2] = SB1[cipherTextR_2[2]]; cipherTextR_2[3] = SB2[cipherTextR_2[3]]; cipherTextR_2[4] = SB3[cipherTextR_2[4]]; cipherTextR_2[5] = SB4[cipherTextR_2[5]]; cipherTextR_2[6] = SB1[cipherTextR_2[6]]; cipherTextR_2[7] = SB2[cipherTextR_2[7]]; cipherTextR_2[8] = SB3[cipherTextR_2[8]]; cipherTextR_2[9] = SB4[cipherTextR_2[9]]; cipherTextR_2[10] = SB1[cipherTextR_2[10]]; cipherTextR_2[11] = SB2[cipherTextR_2[11]]; cipherTextR_2[12] = SB3[cipherTextR_2[12]]; cipherTextR_2[13] = SB4[cipherTextR_2[13]]; cipherTextR_2[14] = SB1[cipherTextR_2[14]]; cipherTextR_2[15] = SB2[cipherTextR_2[15]]; #pragma unroll for (int i = 0; i < 16; i++) { cipherTextR_2[i] = cipherTextR_2[i] ^ keySmem[192 + i];//ek13 } //XOR with the previous block. #pragma unroll for (int i = 0; i < 16; i++) { cipherTextR_2[i] = cipherTextR_2[i] ^ cipherTextR_1[i]; } //Write back to global memory for (int i = 0; i < 16; i++) { cipherText[16 * idx + i] = cipherTextR_2[i]; } } else if (keySize == 24)//192-bit keys { F0_d(cipherTextR_2, &keySmem[0], SB1, SB2, SB3, SB4);//ek1... FE_d(cipherTextR_2, &keySmem[16], SB1, SB2, SB3, SB4); F0_d(cipherTextR_2, &keySmem[32], SB1, SB2, SB3, SB4); FE_d(cipherTextR_2, &keySmem[48], SB1, SB2, SB3, SB4); F0_d(cipherTextR_2, &keySmem[64], SB1, SB2, SB3, SB4); FE_d(cipherTextR_2, &keySmem[80], SB1, SB2, SB3, SB4); F0_d(cipherTextR_2, &keySmem[96], SB1, SB2, SB3, SB4); FE_d(cipherTextR_2, &keySmem[112], SB1, SB2, SB3, SB4); F0_d(cipherTextR_2, &keySmem[128], SB1, SB2, SB3, SB4); FE_d(cipherTextR_2, &keySmem[144], SB1, SB2, SB3, SB4); F0_d(cipherTextR_2, &keySmem[160], SB1, SB2, SB3, SB4); FE_d(cipherTextR_2, &keySmem[176], SB1, SB2, SB3, SB4); F0_d(cipherTextR_2, &keySmem[192], SB1, SB2, SB3, SB4);//ek13 #pragma unroll for (int i = 0; i < 16; i++) { cipherTextR_2[i] = cipherTextR_2[i] ^ keySmem[208 + i];//ek14 } cipherTextR_2[0] = SB3[cipherTextR_2[0]]; cipherTextR_2[1] = SB4[cipherTextR_2[1]]; cipherTextR_2[2] = SB1[cipherTextR_2[2]]; cipherTextR_2[3] = SB2[cipherTextR_2[3]]; cipherTextR_2[4] = SB3[cipherTextR_2[4]]; cipherTextR_2[5] = SB4[cipherTextR_2[5]]; cipherTextR_2[6] = SB1[cipherTextR_2[6]]; cipherTextR_2[7] = SB2[cipherTextR_2[7]]; cipherTextR_2[8] = SB3[cipherTextR_2[8]]; cipherTextR_2[9] = SB4[cipherTextR_2[9]]; cipherTextR_2[10] = SB1[cipherTextR_2[10]]; cipherTextR_2[11] = SB2[cipherTextR_2[11]]; cipherTextR_2[12] = SB3[cipherTextR_2[12]]; cipherTextR_2[13] = SB4[cipherTextR_2[13]]; cipherTextR_2[14] = SB1[cipherTextR_2[14]]; cipherTextR_2[15] = SB2[cipherTextR_2[15]]; #pragma unroll for (int i = 0; i < 16; i++) { cipherTextR_2[i] = cipherTextR_2[i] ^ keySmem[224 + i];//ek15 } //XOR with the previous block. #pragma unroll for (int i = 0; i < 16; i++) { cipherTextR_2[i] = cipherTextR_2[i] ^ cipherTextR_1[i]; } //Write back to global memory for (int i = 0; i < 16; i++) { cipherText[16 * idx + i] = cipherTextR_2[i]; } } else//256-bit keys { F0_d(cipherTextR_2, &keySmem[0], SB1, SB2, SB3, SB4);//ek1... FE_d(cipherTextR_2, &keySmem[16], SB1, SB2, SB3, SB4); F0_d(cipherTextR_2, &keySmem[32], SB1, SB2, SB3, SB4); FE_d(cipherTextR_2, &keySmem[48], SB1, SB2, SB3, SB4); F0_d(cipherTextR_2, &keySmem[64], SB1, SB2, SB3, SB4); FE_d(cipherTextR_2, &keySmem[80], SB1, SB2, SB3, SB4); F0_d(cipherTextR_2, &keySmem[96], SB1, SB2, SB3, SB4); FE_d(cipherTextR_2, &keySmem[112], SB1, SB2, SB3, SB4); F0_d(cipherTextR_2, &keySmem[128], SB1, SB2, SB3, SB4); FE_d(cipherTextR_2, &keySmem[144], SB1, SB2, SB3, SB4); F0_d(cipherTextR_2, &keySmem[160], SB1, SB2, SB3, SB4); FE_d(cipherTextR_2, &keySmem[176], SB1, SB2, SB3, SB4); F0_d(cipherTextR_2, &keySmem[192], SB1, SB2, SB3, SB4); FE_d(cipherTextR_2, &keySmem[208], SB1, SB2, SB3, SB4); F0_d(cipherTextR_2, &keySmem[224], SB1, SB2, SB3, SB4);//ek15 #pragma unroll for (int i = 0; i < 16; i++) { cipherTextR_2[i] = cipherTextR_2[i] ^ keySmem[240 + i];//ek16 } cipherTextR_2[0] = SB3[cipherTextR_2[0]]; cipherTextR_2[1] = SB4[cipherTextR_2[1]]; cipherTextR_2[2] = SB1[cipherTextR_2[2]]; cipherTextR_2[3] = SB2[cipherTextR_2[3]]; cipherTextR_2[4] = SB3[cipherTextR_2[4]]; cipherTextR_2[5] = SB4[cipherTextR_2[5]]; cipherTextR_2[6] = SB1[cipherTextR_2[6]]; cipherTextR_2[7] = SB2[cipherTextR_2[7]]; cipherTextR_2[8] = SB3[cipherTextR_2[8]]; cipherTextR_2[9] = SB4[cipherTextR_2[9]]; cipherTextR_2[10] = SB1[cipherTextR_2[10]]; cipherTextR_2[11] = SB2[cipherTextR_2[11]]; cipherTextR_2[12] = SB3[cipherTextR_2[12]]; cipherTextR_2[13] = SB4[cipherTextR_2[13]]; cipherTextR_2[14] = SB1[cipherTextR_2[14]]; cipherTextR_2[15] = SB2[cipherTextR_2[15]]; #pragma unroll for (int i = 0; i < 16; i++) { cipherTextR_2[i] = cipherTextR_2[i] ^ keySmem[256 + i];//ek17 } //XOR with the previous block. #pragma unroll for (int i = 0; i < 16; i++) { cipherTextR_2[i] = cipherTextR_2[i] ^ cipherTextR_1[i]; } //Write back to global memory for (int i = 0; i < 16; i++) { cipherText[16 * idx + i] = cipherTextR_2[i]; } } } int main(void) { /////////INPUT PART BEGIN////////////////////// //Device pointers: uint8* deviceArr, *dk_d, *SB_dev, *IV_d; FILE *file; uint8* inputText;//ciphertext input unsigned long int fileLen, textSize; uint8 numOfRounds; const uint8 keySize = 32; uint8 key[32] = {0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x08, 0x09, 0x0a, 0x0b, 0x0c, 0x0d, 0x0e, 0x0f, 0x10, 0x11, 0x12, 0x13, 0x14, 0x15, 0x16, 0x17, 0x18, 0x19, 0x1a, 0x1b, 0x1c, 0x1d, 0x1e, 0x1f}; uint8 IV[16] = {0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00}; file = fopen("../input.txt", "r"); if (file) { char buf[2]; fseek(file, 0, SEEK_END); fileLen = ftell(file); fseek(file, 0, SEEK_SET); textSize = fileLen / 2; inputText = (uint8*)malloc(textSize); for (int i = 0; i < textSize; i++) { buf[0] = fgetc(file); buf[1] = fgetc(file); uint8 hexVal = (uint8)strtol(buf, NULL, 16); inputText[i] = hexVal; } } else { printf("File not found.\n"); return -1; } /////////INPUT PART END////////////////////// if (keySize == 16) numOfRounds = 13; else if (keySize == 24) numOfRounds = 15; else numOfRounds = 17; uint8 KL[16];//KL = leftmost 16 bytes of key uint8 KR[16];//KR = rightmost 16 bytes of key /* Most significant byte is stored in 0th index. KL = leftmost 16 bytes of key KR = rightmost 16 bytes of key */ for (int i = 0; i < 16; i++) { KL[i] = key[i]; } for (int i = 0; i < 16; i++) { KR[i] = key[i + 16]; } uint8* CK1, *CK2, *CK3; if (keySize == 16) { CK1 = C1; CK2 = C2; CK3 = C3; } else if (keySize == 24) { CK1 = C2; CK2 = C3; CK3 = C1; } else { CK1 = C3; CK2 = C1; CK3 = C2; } //Calculate round key generators W0,W1,W2,W3 uint8* W0 = KL; uint8 W1[16]; uint8 W2[16]; uint8 W3[16]; uint8 Fres[16];//auxilary array /* W0, W1, W2, W3 are calculated only once and used for all blocks. Since the key data W0 and CK1 are small enough this key generators are calculated in CPU. W1 needed for calc of W2, W2 needed for calc of W3. F0 and FE are also used in the encryption process. */ F0(W0, CK1, Fres); XOR_16(Fres, KR, W1); FE(W1, CK2, Fres); XOR_16(Fres, W0, W2); F0(W2, CK3, Fres); XOR_16(Fres, W1, W3); GenerateRoundKeys(W0, W1, W2, W3); /* Because each thread will process 16 bytes we need textSize/16 threads in total. Then thread number per block is: ceil(textSize/(16*blockSize)) bytes. To decide blockSize we must consider the main occupancy limiter, in this case number of registers per SM. Based on NVIDIA's programming guide Number of 32-bit registers per multiprocessor for compute capability >= 5.0 is 64K. In this code 16 registers used for plaintext, 16 registers auxilary, +1 by itself, each thread uses 33 registers. Then blocksize must be smaller than 64k/33. And larger than 272 since first 272 threads loads the shared memory. 512, 1024 are available blockSizes. 256 can also be tried but number of threads loading the shared memory must be decreased. Keeping the round keys in registers results in low number of warps per SM therefore poor performance. */ int blockSize = 256; int numOfBlocks = ceil((float)(textSize) / (16 * blockSize)); //Decryption round keys are derived from the encryption round keys which is generated by GenerateRoundKeys. GenerateDecRoundKeys(numOfRounds); uint8* resPlainText = (uint8*)malloc(textSize); cudaMalloc((void**)& deviceArr, textSize); cudaMalloc((void**)& dk_d, 272); cudaMalloc((void**)& IV_d, 16); cudaMalloc((void**)& SB_dev, 1024); //START TIMER. using namespace std::chrono; high_resolution_clock::time_point start = high_resolution_clock::now(); cudaMemcpy(deviceArr, inputText, textSize, cudaMemcpyHostToDevice); cudaMemcpy(dk_d, dk, 272, cudaMemcpyHostToDevice); cudaMemcpy(IV_d, IV, 16, cudaMemcpyHostToDevice); //Move Substition layer tables to global memory.(will be moved to shared memory in the kernel.) cudaMemcpy(SB_dev, SB1, 256, cudaMemcpyHostToDevice); cudaMemcpy(SB_dev + 256, SB2, 256, cudaMemcpyHostToDevice); cudaMemcpy(SB_dev + 512, SB3, 256, cudaMemcpyHostToDevice); cudaMemcpy(SB_dev + 768, SB4, 256, cudaMemcpyHostToDevice); Decrypt<keySize> << <numOfBlocks, blockSize >> > (deviceArr, textSize, dk_d, SB_dev ,IV_d); cudaMemcpy(resPlainText, deviceArr, textSize, cudaMemcpyDeviceToHost); //END TIMER; PRINT ELAPSED TIME. high_resolution_clock::time_point end = high_resolution_clock::now(); duration<double> timeElapsed = duration_cast<duration<double>>(end - start); std::cout << "Time elapsed: " << timeElapsed.count() << std::endl; //Print/write to file FILE *f = fopen("output.txt", "w"); for (int i = 0; i < textSize; i++) { fprintf(f, "%02x", resPlainText[i]); } fclose(f); //free cudaFree(deviceArr); cudaFree(dk_d); free(resPlainText); return 0; }
581587d690b6a03b213030a8bad7ece9bf3050b4.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include <ATen/hip/HIPContext.h> #include <ATen/hip/NumericLimits.cuh> #include <ATen/AccumulateType.h> #include <ATen/Dispatch.h> #include <ATen/TensorUtils.h> #include <ATen/NumericUtils.h> #include <ATen/native/Resize.h> #include <ATen/native/ReduceOps.h> #include <c10/util/accumulate.h> #include <THH/THHGeneral.h> #include <THH/THHNumerics.cuh> #include <ATen/hip/cub.cuh> namespace at { namespace native { template <typename integer> constexpr inline integer ceil_div(integer n, integer m) { return (n + m - 1) / m; } template<typename scalar_t, typename idx_t, typename BinaryOperation> __device__ void binary_op_update(const scalar_t lhs, scalar_t& rhs, const idx_t lhs_idx, idx_t& rhs_idx, BinaryOperation binary_op) { if(!THCNumerics<scalar_t>::isnan(rhs) && (THCNumerics<scalar_t>::isnan(lhs) || !binary_op(rhs, lhs))) { rhs = lhs; rhs_idx = lhs_idx; } } /* Perform an inclusive scan along the innermost dimension of a tensor. * * - num_rows is the size of the flattened outer dimensions; * - row_size is the size of the innermost dimension; * * The outer dimensions of the tensor are considered as a single dimension, i.e. the tensor is * considered as having 'num_rows' rows of size 'row_size'. * Each thread block processes one or more sets of contiguous rows (processing multiple rows * per thread block is quicker than processing a single row, especially for short rows). */ template<typename scalar_t, int num_threads_x, int num_threads_y, class BinaryFunction> __global__ void tensor_kernel_scan_innermost_dim_with_indices(const scalar_t *self_, scalar_t *values_, int64_t *indices_, int num_rows, int row_size, scalar_t init, BinaryFunction binary_op) { __shared__ scalar_t vbuf[num_threads_y][2 * num_threads_x]; __shared__ int64_t ibuf[num_threads_y][2 * num_threads_x]; scalar_t* row_buf = vbuf[threadIdx.y]; int64_t* row_idx_buf = ibuf[threadIdx.y]; for (int block_row = blockIdx.x * blockDim.y; block_row < num_rows; block_row += blockDim.y * gridDim.x) { int row = block_row + threadIdx.y; const scalar_t *row_self = self_ + row * row_size; scalar_t *row_values = values_ + row * row_size; int64_t *row_indices = indices_ + row * row_size; scalar_t block_total = init; int64_t block_idx_final = 0; // Perform scan on one block at a time, keeping track of the total value of // all blocks processed so far. for (int block_col = 0; block_col < row_size; block_col += 2 * num_threads_x) { // Load data into shared memory (two values per thread). int col1 = block_col + threadIdx.x; int col2 = block_col + num_threads_x + threadIdx.x; if (row < num_rows) { if (col1 < row_size) { row_buf[threadIdx.x] = row_self[col1]; row_idx_buf[threadIdx.x] = col1; } else { row_buf[threadIdx.x] = init; // No need to set the index here as the value in init will never be selected } if (col2 < row_size) { row_buf[num_threads_x + threadIdx.x] = row_self[col2]; row_idx_buf[num_threads_x + threadIdx.x] = col2; } else { row_buf[num_threads_x + threadIdx.x] = init; // No need to set the index here as the value in init will never be selected } // Add the total value of all previous blocks to the first value of this block. if (threadIdx.x == 0) { binary_op_update(block_total, row_buf[0], block_idx_final, row_idx_buf[0], binary_op); } } __syncthreads(); // Parallel reduction (up-sweep). for (int s = num_threads_x, d = 1; s >= 1; s >>= 1, d <<= 1) { if (row < num_rows && threadIdx.x < s) { int offset = (2 * threadIdx.x + 1) * d - 1; binary_op_update(row_buf[offset], row_buf[offset + d], row_idx_buf[offset], row_idx_buf[offset + d], binary_op); } __syncthreads(); } // Down-sweep. for (int s = 2, d = num_threads_x / 2; d >= 1; s <<= 1, d >>= 1) { if (row < num_rows && threadIdx.x < s - 1) { int offset = 2 * (threadIdx.x + 1) * d - 1; binary_op_update(row_buf[offset], row_buf[offset + d], row_idx_buf[offset], row_idx_buf[offset + d], binary_op); } __syncthreads(); } // Write back to output. if (row < num_rows) { if (col1 < row_size){ row_values[col1] = row_buf[threadIdx.x]; row_indices[col1] = row_idx_buf[threadIdx.x]; } if (col2 < row_size) { row_values[col2] = row_buf[num_threads_x + threadIdx.x]; row_indices[col2] = row_idx_buf[num_threads_x + threadIdx.x]; } } block_total = row_buf[2 * num_threads_x - 1]; block_idx_final = row_idx_buf[2 * num_threads_x - 1]; __syncthreads(); } } } /* Perform an inclusive scan along an outer dimension of a tensor. * * - num_orows is the size of the flattened outer dimensions; * - num_irows is the size of the flattened inner dimensions; * - row_size is the size of the dimension along which to compute the variance; * * The dimensions to the outside and inside of the specified dimension are considered as flattened. * Thread blocks with the same blockIdx.y process an "outer row" (i.e. an element of the flattened * outer dimensions, which contains several "inner rows"). * Each thread processes a single inner row at a time. */ template<typename scalar_t, class BinaryFunction> __global__ void tensor_kernel_scan_outer_dim_with_indices(scalar_t *self_, scalar_t *values_, int64_t *indices_, const uint32_t num_orows, const uint32_t num_irows, const uint32_t row_size, scalar_t init, BinaryFunction binary_op) { for (uint32_t orow = blockIdx.x; orow < num_orows; orow += gridDim.x) { for (uint32_t irow = blockIdx.y * blockDim.x + threadIdx.x; irow < num_irows; irow += gridDim.y * blockDim.x) { scalar_t *self = self_ + orow * row_size * num_irows + irow; scalar_t *values = values_ + orow * row_size * num_irows + irow; int64_t *indices = indices_ + orow * row_size * num_irows + irow; scalar_t out = init; int64_t out_idx = 0; for (auto col = decltype(row_size){0}; col < row_size; ++col) { if(THCNumerics<scalar_t>::isnan(*self) || (!THCNumerics<scalar_t>::isnan(out) && binary_op(*self, out))) { out = *self; out_idx = col; } *values = out; *indices = out_idx; self += num_irows; values += num_irows; indices += num_irows; } } } } void check_fits_in_unsigned(int64_t val, const char* name) { constexpr auto umax = std::numeric_limits<uint32_t>::max(); TORCH_CHECK( val >= 0 && val <= umax, name, " must fit in a 32-bit uint32_t value"); } template<typename scalar_t, class BinaryFunction> __host__ void scan_outer_dim_with_indices(const Tensor& self, Tensor& values, Tensor& indices, int dim, scalar_t init, BinaryFunction binary_op) { int64_t row_size = self.size(dim); auto sizes = self.sizes(); // Treat all outer dimensions (i.e. dim_ < dim) as one. const int64_t num_orows = c10::multiply_integers(sizes.begin(), sizes.begin() + dim); // Treat all inner dimensions (i.e. dim > dimension) as one. const int64_t num_irows = c10::multiply_integers(sizes.begin() + dim + 1, sizes.end()); //for performance reasons, cuda kernels use uint32_t for loops over irows, orows and row, //make sure that input is not bigger than supported by uint32_t check_fits_in_unsigned(num_irows, "num_irows"); check_fits_in_unsigned(num_orows, "num_orows"); check_fits_in_unsigned(row_size, "row_size"); dim3 threads(::min(512, int(num_irows))); int64_t maxGridDim = at::cuda::getCurrentDeviceProperties()->maxGridSize[1]; dim3 grid(::min(maxGridDim, num_orows), ::min(maxGridDim, ceil_div(num_irows, int64_t{threads.x}))); hipLaunchKernelGGL(( tensor_kernel_scan_outer_dim_with_indices<scalar_t>), dim3(grid), dim3(threads), 0, at::hip::getCurrentHIPStreamMasqueradingAsCUDA(), self.data_ptr<scalar_t>(), values.data_ptr<scalar_t>(), indices.data_ptr<int64_t>(), num_orows, num_irows, row_size, init, binary_op); C10_HIP_KERNEL_LAUNCH_CHECK(); } template <typename scalar_t, class BinaryFunction> __host__ void scan_innermost_dim_with_indices(const Tensor& self, Tensor& values, Tensor& indices, scalar_t init, BinaryFunction binary_op) { int ndim = self.dim(); // Treat all outer dimensions as a single dimension. int row_size = self.size(ndim - 1); int num_rows = self.numel() / row_size; dim3 threads(16, 32); dim3 grid(::min(at::cuda::getCurrentDeviceProperties()->maxGridSize[0], ceil_div(num_rows, int(threads.y)))); hipLaunchKernelGGL(( tensor_kernel_scan_innermost_dim_with_indices<scalar_t, 16, 32>), dim3(grid), dim3(threads), 0, at::hip::getCurrentHIPStreamMasqueradingAsCUDA(), self.data_ptr<scalar_t>(), values.data_ptr<scalar_t>(), indices.data_ptr<int64_t>(), num_rows, row_size, init, binary_op); C10_HIP_KERNEL_LAUNCH_CHECK(); } template<typename scalar_t, typename BinaryFunction> void scan_dim_with_indices(const Tensor& self, Tensor& values, Tensor& indices, //int64_t dim) { int64_t dim, scalar_t init, BinaryFunction binary_op) { int ndim = self.dim(); Tensor self_ = self.contiguous(); Tensor values_ = values.contiguous(); Tensor indices_ = indices.contiguous(); bool copy_values = !values.is_contiguous(); bool copy_indices = !indices.is_contiguous(); if (dim == ndim - 1) { scan_innermost_dim_with_indices<scalar_t>(self_, values_, indices_, init, binary_op); } else { scan_outer_dim_with_indices<scalar_t>(self_, values_, indices_, dim, init, binary_op); } if (copy_values){ values.copy_(values_); } if (copy_indices){ indices.copy_(indices_); } } void cummax_helper_cuda(const Tensor& self, Tensor& values, Tensor& indices, int64_t dim) { TensorArg output_arg{ values, "output", 1 }; TensorArg indices_arg{ indices, "indices", 2 }; TensorArg input_arg{ self, "input", 3 }; checkAllSameGPU(__func__, {output_arg, indices_arg, input_arg}); AT_DISPATCH_ALL_TYPES_AND3(at::ScalarType::Bool, at::ScalarType::Half, at::ScalarType::BFloat16, self.scalar_type(), "cummax_cuda", [&]() { scalar_t init = self.is_floating_point() ? (-1*std::numeric_limits<scalar_t>::infinity()) : std::numeric_limits<scalar_t>::lowest(); scan_dim_with_indices<scalar_t>(self, values, indices, dim, init, std::greater_equal<scalar_t>()); }); } void cummin_helper_cuda(const Tensor& self, Tensor& values, Tensor& indices, int64_t dim) { TensorArg output_arg{ values, "output", 1 }; TensorArg indices_arg{ indices, "indices", 2 }; TensorArg input_arg{ self, "input", 3 }; checkAllSameGPU(__func__, {output_arg, indices_arg, input_arg}); AT_DISPATCH_ALL_TYPES_AND3(at::ScalarType::Bool, at::ScalarType::Half, at::ScalarType::BFloat16, self.scalar_type(), "cummin_cuda", [&]() { scalar_t init = self.is_floating_point() ? std::numeric_limits<scalar_t>::infinity() : std::numeric_limits<scalar_t>::max(); scan_dim_with_indices<scalar_t>(self, values, indices, dim, init, std::less_equal<scalar_t>()); }); } // TODO: The implementation of `tensor_kernel_scan_outer_dim` and // `tensor_kernel_scan_innermost_dim` is similar to // `tensor_kernel_scan_outer_dim_with_indices` // `tensor_kernel_scan_outer_dim_with_indices` and should be refactored to // remove the duplication. /* Perform an inclusive scan along an outer dimension of a tensor. * * - num_orows is the size of the flattened outer dimensions; * - num_irows is the size of the flattened inner dimensions; * - row_size is the size of the dimension along which to scan; * * The dimensions to the outside and inside of the specified dimension are considered as flattened. * Thread blocks with the same blockIdx.y process an "outer row" (i.e. an element of the flattened * outer dimensions, which contains several "inner rows"). * Each thread processes a single inner row at a time. */ template<typename scalar_t, class BinaryOp> __global__ void tensor_kernel_scan_outer_dim(scalar_t *tgt_, scalar_t *src_, const uint32_t num_orows, const uint32_t num_irows, const uint32_t row_size, const scalar_t init, BinaryOp binary_op) { for (uint32_t orow = blockIdx.x; orow < num_orows; orow += gridDim.x) { for (uint32_t irow = blockIdx.y * blockDim.x + threadIdx.x; irow < num_irows; irow += gridDim.y * blockDim.x) { scalar_t *src = src_ + orow * row_size * num_irows + irow; scalar_t *tgt = tgt_ + orow * row_size * num_irows + irow; scalar_t acc = init; for (uint32_t col = 0; col < row_size; ++col) { acc = binary_op(acc, *src); *tgt = acc; src += num_irows; tgt += num_irows; } } } } /* Perform an inclusive scan along the innermost dimension of a tensor. * * - num_rows is the size of the flattened outer dimensions; * - row_size is the size of the innermost dimension; * * The outer dimensions of the tensor are considered as a single dimension, i.e. the tensor is * considered as having 'num_rows' rows of size 'row_size'. * Each thread block processes one or more sets of contiguous rows (processing multiple rows * per thread block is quicker than processing a single row, especially for short rows). */ template<typename T, int num_threads_x, int num_threads_y, class BinaryFunction> __device__ void tensor_kernel_scan_innermost_dim_impl(T* row_buf, T *tgt_, T *src_, const uint32_t num_rows, const uint32_t row_size, T init, BinaryFunction binary_op){ for (uint32_t block_row = blockIdx.x * blockDim.y; block_row < num_rows; block_row += blockDim.y * gridDim.x) { uint32_t row = block_row + threadIdx.y; T block_total = init; T *row_src = src_ + row * row_size; T *row_tgt = tgt_ + row * row_size; // Perform scan on one block at a time, keeping track of the total value of // all blocks processed so far. for (uint32_t block_col = 0; block_col < row_size; block_col += 2 * num_threads_x) { // Load data into shared memory (two values per thread). uint32_t col1 = block_col + threadIdx.x; uint32_t col2 = block_col + num_threads_x + threadIdx.x; if (row < num_rows) { if (col1 < row_size) { row_buf[threadIdx.x] = row_src[col1]; } else { row_buf[threadIdx.x] = init; } if (col2 < row_size) { row_buf[num_threads_x + threadIdx.x] = row_src[col2]; } else { row_buf[num_threads_x + threadIdx.x] = init; } // Add the total value of all previous blocks to the first value of this block. if (threadIdx.x == 0) { row_buf[0] = binary_op(row_buf[0], block_total); } } __syncthreads(); // Parallel reduction (up-sweep). for (uint32_t s = num_threads_x, d = 1; s >= 1; s >>= 1, d <<= 1) { if (row < num_rows && threadIdx.x < s) { uint32_t offset = (2 * threadIdx.x + 1) * d - 1; row_buf[offset + d] = binary_op(row_buf[offset], row_buf[offset + d]); } __syncthreads(); } // Down-sweep. for (uint32_t s = 2, d = num_threads_x / 2; d >= 1; s <<= 1, d >>= 1) { if (row < num_rows && threadIdx.x < s - 1) { uint32_t offset = 2 * (threadIdx.x + 1) * d - 1; row_buf[offset + d] = binary_op(row_buf[offset], row_buf[offset + d]); } __syncthreads(); } // Write back to output. if (row < num_rows) { if (col1 < row_size) row_tgt[col1] = row_buf[threadIdx.x]; if (col2 < row_size) row_tgt[col2] = row_buf[num_threads_x + threadIdx.x]; } block_total = row_buf[2 * num_threads_x - 1]; __syncthreads(); } } } template < typename T, int num_threads_x, int num_threads_y, class BinaryFunction> __global__ typename std::enable_if<!c10::is_complex<T>::value, void>::type tensor_kernel_scan_innermost_dim( T* tgt_, T* src_, const uint32_t num_rows, const uint32_t row_size, T init, BinaryFunction binary_op) { __shared__ T sbuf[num_threads_y][2 * num_threads_x]; T* row_buf = sbuf[threadIdx.y]; tensor_kernel_scan_innermost_dim_impl<T, num_threads_x, num_threads_y>( row_buf, tgt_, src_, num_rows, row_size, init, binary_op); } template < typename T, int num_threads_x, int num_threads_y, class BinaryFunction> __global__ typename std::enable_if<c10::is_complex<T>::value, void>::type tensor_kernel_scan_innermost_dim( T* tgt_, T* src_, const uint32_t num_rows, const uint32_t row_size, T init, BinaryFunction binary_op) { // As we cannot directly initialize shared array for complex types // Reference: // `error: initializer not allowed for __shared__ variable` // We instead get the base scalar type and allocate twice number of // elements required of base type and reinterpret them as complex. using base_t = typename scalar_value_type<T>::type; __shared__ base_t sbuf[num_threads_y][4 * num_threads_x]; T* row_buf = reinterpret_cast<T*>(sbuf[threadIdx.y]); tensor_kernel_scan_innermost_dim_impl<T, num_threads_x, num_threads_y>( row_buf, tgt_, src_, num_rows, row_size, init, binary_op); } template<typename scalar_t, class BinaryFunction> __host__ void scan_outer_dim(const Tensor& self, Tensor& result, int dim, scalar_t init, BinaryFunction binary_op) { const int64_t row_size = self.size(dim); auto sizes = self.sizes(); // Treat all outer dimensions (i.e. dim_ < dim) as one. const int64_t num_orows = c10::multiply_integers(sizes.begin(), sizes.begin() + dim); // Treat all inner dimensions (i.e. dim > dimension) as one. const int64_t num_irows = c10::multiply_integers(sizes.begin() + dim + 1, sizes.end()); dim3 threads(::min(512, int(num_irows))); int64_t maxGridDim = at::cuda::getCurrentDeviceProperties()->maxGridSize[1]; dim3 grid(::min(maxGridDim, num_orows), ::min(maxGridDim, ceil_div(num_irows, int64_t{threads.x}))); check_fits_in_unsigned(num_irows, "num_irows"); check_fits_in_unsigned(num_orows, "num_orows"); check_fits_in_unsigned(row_size, "row_size"); hipLaunchKernelGGL(( tensor_kernel_scan_outer_dim<scalar_t>), dim3(grid), dim3(threads), 0, at::hip::getCurrentHIPStreamMasqueradingAsCUDA(), result.data_ptr<scalar_t>(), self.data_ptr<scalar_t>(), num_orows, num_irows, row_size, init, binary_op); C10_HIP_KERNEL_LAUNCH_CHECK(); } template <typename scalar_t, class BinaryFunction> void scan_innermost_dim(const Tensor& self, Tensor& result, scalar_t init, BinaryFunction binary_op) { int64_t ndim = self.dim(); // Treat all outer dimensions as a single dimension. int64_t row_size = self.size(ndim - 1); int64_t num_rows = self.numel() / row_size; dim3 threads(16, 32); int64_t maxGridDim = at::cuda::getCurrentDeviceProperties()->maxGridSize[0]; dim3 grid(::min(maxGridDim, ceil_div(num_rows, int64_t{threads.y}))); check_fits_in_unsigned(num_rows, "Number of rows (self.numel()/self.size(self.dim()-1))"); check_fits_in_unsigned(row_size, "row_size"); hipLaunchKernelGGL(( tensor_kernel_scan_innermost_dim<scalar_t, 16, 32>), dim3(grid), dim3(threads), 0, at::hip::getCurrentHIPStreamMasqueradingAsCUDA(), result.data_ptr<scalar_t>(), self.data_ptr<scalar_t>(), num_rows, row_size, init, binary_op); C10_HIP_KERNEL_LAUNCH_CHECK(); } template<typename scalar_t, typename BinaryFunction> void scan_dim(const Tensor& self, const Tensor& result, int64_t dim, scalar_t init, BinaryFunction binary_op) { int ndim = self.dim(); Tensor self_ = self.contiguous(); bool copy_result = !result.is_contiguous(); Tensor result_ = result.contiguous(); if (self.numel() == self.size(dim)) { cuda::cub::inclusive_scan(self_.data_ptr<scalar_t>(), result_.data_ptr<scalar_t>(), binary_op, self.numel()); } else if (dim == ndim - 1) { scan_innermost_dim<scalar_t>(self_, result_, init, binary_op); } else { scan_outer_dim<scalar_t>(self_, result_, dim, init, binary_op); } if (copy_result) { result.copy_(result_); } } Tensor& _logcumsumexp_out_cuda(const Tensor& self, int64_t dim, Tensor& result) { result.resize_(self.sizes()); if (self.dim() == 0) { result.fill_(self); return result; } if (self.numel() == 0) { result.zero_(); return result; } auto wrap_dim = maybe_wrap_dim(dim, self.dim()); TensorArg output_arg{ result, "output", 1 }; TensorArg input_arg{ self, "input", 2 }; checkAllSameGPU(__func__, {output_arg, input_arg}); AT_DISPATCH_FLOATING_TYPES_AND2( ScalarType::Half, ScalarType::BFloat16, self.scalar_type(), "logcumsumexp_cuda", [&]() { using accscalar_t = acc_type<scalar_t, true>; scalar_t init = -std::numeric_limits<scalar_t>::infinity(); auto log_add_exp = [] C10_HOST_DEVICE (const scalar_t x, const scalar_t y) -> scalar_t { scalar_t min = at::_isnan(y) ? y : std::min<scalar_t>(x,y); //std::min returns first arg if one of the args is nan scalar_t max = at::_isnan(y) ? y : std::max<scalar_t>(x,y); //std::max returns first arg if one of the args is nan if (min != max || ::isfinite(static_cast<accscalar_t>(min))) { // nan will be propagated here return ::log1p(::exp(min - max)) + max; } else { // special case to correctly handle infinite inputs return x; } }; scan_dim<scalar_t>(self, result, wrap_dim, init, log_add_exp); }); return result; } Tensor _logcumsumexp_cuda(const Tensor& self, int64_t dim) { Tensor result = at::empty_like(self, MemoryFormat::Contiguous); return _logcumsumexp_out_cuda(self, dim, result); } void cumsum_cuda_kernel(const Tensor& result, const Tensor& self, int64_t dim) { AT_DISPATCH_ALL_TYPES_AND_COMPLEX_AND2( ScalarType::Half, ScalarType::BFloat16, self.scalar_type(), "cumsum_cuda", [&]() { scalar_t init = 0; scan_dim<scalar_t>( self, result, dim, init, std::plus<scalar_t>()); }); } void cumprod_cuda_kernel(const Tensor& result, const Tensor& self, int64_t dim) { AT_DISPATCH_ALL_TYPES_AND_COMPLEX_AND2( ScalarType::Half, ScalarType::BFloat16, self.scalar_type(), "cumprod_cuda", [&]() { scalar_t init = 1; scan_dim<scalar_t>( self, result, dim, init, std::multiplies<scalar_t>()); }); } REGISTER_DISPATCH(cumsum_stub, &cumsum_cuda_kernel); REGISTER_DISPATCH(cumprod_stub, &cumprod_cuda_kernel); }} // namespace at::native
581587d690b6a03b213030a8bad7ece9bf3050b4.cu
#include <ATen/cuda/CUDAContext.h> #include <ATen/cuda/NumericLimits.cuh> #include <ATen/AccumulateType.h> #include <ATen/Dispatch.h> #include <ATen/TensorUtils.h> #include <ATen/NumericUtils.h> #include <ATen/native/Resize.h> #include <ATen/native/ReduceOps.h> #include <c10/util/accumulate.h> #include <THC/THCGeneral.h> #include <THC/THCNumerics.cuh> #include <ATen/cuda/cub.cuh> namespace at { namespace native { template <typename integer> constexpr inline integer ceil_div(integer n, integer m) { return (n + m - 1) / m; } template<typename scalar_t, typename idx_t, typename BinaryOperation> __device__ void binary_op_update(const scalar_t lhs, scalar_t& rhs, const idx_t lhs_idx, idx_t& rhs_idx, BinaryOperation binary_op) { if(!THCNumerics<scalar_t>::isnan(rhs) && (THCNumerics<scalar_t>::isnan(lhs) || !binary_op(rhs, lhs))) { rhs = lhs; rhs_idx = lhs_idx; } } /* Perform an inclusive scan along the innermost dimension of a tensor. * * - num_rows is the size of the flattened outer dimensions; * - row_size is the size of the innermost dimension; * * The outer dimensions of the tensor are considered as a single dimension, i.e. the tensor is * considered as having 'num_rows' rows of size 'row_size'. * Each thread block processes one or more sets of contiguous rows (processing multiple rows * per thread block is quicker than processing a single row, especially for short rows). */ template<typename scalar_t, int num_threads_x, int num_threads_y, class BinaryFunction> __global__ void tensor_kernel_scan_innermost_dim_with_indices(const scalar_t *self_, scalar_t *values_, int64_t *indices_, int num_rows, int row_size, scalar_t init, BinaryFunction binary_op) { __shared__ scalar_t vbuf[num_threads_y][2 * num_threads_x]; __shared__ int64_t ibuf[num_threads_y][2 * num_threads_x]; scalar_t* row_buf = vbuf[threadIdx.y]; int64_t* row_idx_buf = ibuf[threadIdx.y]; for (int block_row = blockIdx.x * blockDim.y; block_row < num_rows; block_row += blockDim.y * gridDim.x) { int row = block_row + threadIdx.y; const scalar_t *row_self = self_ + row * row_size; scalar_t *row_values = values_ + row * row_size; int64_t *row_indices = indices_ + row * row_size; scalar_t block_total = init; int64_t block_idx_final = 0; // Perform scan on one block at a time, keeping track of the total value of // all blocks processed so far. for (int block_col = 0; block_col < row_size; block_col += 2 * num_threads_x) { // Load data into shared memory (two values per thread). int col1 = block_col + threadIdx.x; int col2 = block_col + num_threads_x + threadIdx.x; if (row < num_rows) { if (col1 < row_size) { row_buf[threadIdx.x] = row_self[col1]; row_idx_buf[threadIdx.x] = col1; } else { row_buf[threadIdx.x] = init; // No need to set the index here as the value in init will never be selected } if (col2 < row_size) { row_buf[num_threads_x + threadIdx.x] = row_self[col2]; row_idx_buf[num_threads_x + threadIdx.x] = col2; } else { row_buf[num_threads_x + threadIdx.x] = init; // No need to set the index here as the value in init will never be selected } // Add the total value of all previous blocks to the first value of this block. if (threadIdx.x == 0) { binary_op_update(block_total, row_buf[0], block_idx_final, row_idx_buf[0], binary_op); } } __syncthreads(); // Parallel reduction (up-sweep). for (int s = num_threads_x, d = 1; s >= 1; s >>= 1, d <<= 1) { if (row < num_rows && threadIdx.x < s) { int offset = (2 * threadIdx.x + 1) * d - 1; binary_op_update(row_buf[offset], row_buf[offset + d], row_idx_buf[offset], row_idx_buf[offset + d], binary_op); } __syncthreads(); } // Down-sweep. for (int s = 2, d = num_threads_x / 2; d >= 1; s <<= 1, d >>= 1) { if (row < num_rows && threadIdx.x < s - 1) { int offset = 2 * (threadIdx.x + 1) * d - 1; binary_op_update(row_buf[offset], row_buf[offset + d], row_idx_buf[offset], row_idx_buf[offset + d], binary_op); } __syncthreads(); } // Write back to output. if (row < num_rows) { if (col1 < row_size){ row_values[col1] = row_buf[threadIdx.x]; row_indices[col1] = row_idx_buf[threadIdx.x]; } if (col2 < row_size) { row_values[col2] = row_buf[num_threads_x + threadIdx.x]; row_indices[col2] = row_idx_buf[num_threads_x + threadIdx.x]; } } block_total = row_buf[2 * num_threads_x - 1]; block_idx_final = row_idx_buf[2 * num_threads_x - 1]; __syncthreads(); } } } /* Perform an inclusive scan along an outer dimension of a tensor. * * - num_orows is the size of the flattened outer dimensions; * - num_irows is the size of the flattened inner dimensions; * - row_size is the size of the dimension along which to compute the variance; * * The dimensions to the outside and inside of the specified dimension are considered as flattened. * Thread blocks with the same blockIdx.y process an "outer row" (i.e. an element of the flattened * outer dimensions, which contains several "inner rows"). * Each thread processes a single inner row at a time. */ template<typename scalar_t, class BinaryFunction> __global__ void tensor_kernel_scan_outer_dim_with_indices(scalar_t *self_, scalar_t *values_, int64_t *indices_, const uint32_t num_orows, const uint32_t num_irows, const uint32_t row_size, scalar_t init, BinaryFunction binary_op) { for (uint32_t orow = blockIdx.x; orow < num_orows; orow += gridDim.x) { for (uint32_t irow = blockIdx.y * blockDim.x + threadIdx.x; irow < num_irows; irow += gridDim.y * blockDim.x) { scalar_t *self = self_ + orow * row_size * num_irows + irow; scalar_t *values = values_ + orow * row_size * num_irows + irow; int64_t *indices = indices_ + orow * row_size * num_irows + irow; scalar_t out = init; int64_t out_idx = 0; for (auto col = decltype(row_size){0}; col < row_size; ++col) { if(THCNumerics<scalar_t>::isnan(*self) || (!THCNumerics<scalar_t>::isnan(out) && binary_op(*self, out))) { out = *self; out_idx = col; } *values = out; *indices = out_idx; self += num_irows; values += num_irows; indices += num_irows; } } } } void check_fits_in_unsigned(int64_t val, const char* name) { constexpr auto umax = std::numeric_limits<uint32_t>::max(); TORCH_CHECK( val >= 0 && val <= umax, name, " must fit in a 32-bit uint32_t value"); } template<typename scalar_t, class BinaryFunction> __host__ void scan_outer_dim_with_indices(const Tensor& self, Tensor& values, Tensor& indices, int dim, scalar_t init, BinaryFunction binary_op) { int64_t row_size = self.size(dim); auto sizes = self.sizes(); // Treat all outer dimensions (i.e. dim_ < dim) as one. const int64_t num_orows = c10::multiply_integers(sizes.begin(), sizes.begin() + dim); // Treat all inner dimensions (i.e. dim > dimension) as one. const int64_t num_irows = c10::multiply_integers(sizes.begin() + dim + 1, sizes.end()); //for performance reasons, cuda kernels use uint32_t for loops over irows, orows and row, //make sure that input is not bigger than supported by uint32_t check_fits_in_unsigned(num_irows, "num_irows"); check_fits_in_unsigned(num_orows, "num_orows"); check_fits_in_unsigned(row_size, "row_size"); dim3 threads(std::min(512, int(num_irows))); int64_t maxGridDim = at::cuda::getCurrentDeviceProperties()->maxGridSize[1]; dim3 grid(std::min(maxGridDim, num_orows), std::min(maxGridDim, ceil_div(num_irows, int64_t{threads.x}))); tensor_kernel_scan_outer_dim_with_indices<scalar_t><<<grid, threads, 0, at::cuda::getCurrentCUDAStream()>>>( self.data_ptr<scalar_t>(), values.data_ptr<scalar_t>(), indices.data_ptr<int64_t>(), num_orows, num_irows, row_size, init, binary_op); C10_CUDA_KERNEL_LAUNCH_CHECK(); } template <typename scalar_t, class BinaryFunction> __host__ void scan_innermost_dim_with_indices(const Tensor& self, Tensor& values, Tensor& indices, scalar_t init, BinaryFunction binary_op) { int ndim = self.dim(); // Treat all outer dimensions as a single dimension. int row_size = self.size(ndim - 1); int num_rows = self.numel() / row_size; dim3 threads(16, 32); dim3 grid(std::min(at::cuda::getCurrentDeviceProperties()->maxGridSize[0], ceil_div(num_rows, int(threads.y)))); tensor_kernel_scan_innermost_dim_with_indices<scalar_t, 16, 32><<<grid, threads, 0, at::cuda::getCurrentCUDAStream()>>>( self.data_ptr<scalar_t>(), values.data_ptr<scalar_t>(), indices.data_ptr<int64_t>(), num_rows, row_size, init, binary_op); C10_CUDA_KERNEL_LAUNCH_CHECK(); } template<typename scalar_t, typename BinaryFunction> void scan_dim_with_indices(const Tensor& self, Tensor& values, Tensor& indices, //int64_t dim) { int64_t dim, scalar_t init, BinaryFunction binary_op) { int ndim = self.dim(); Tensor self_ = self.contiguous(); Tensor values_ = values.contiguous(); Tensor indices_ = indices.contiguous(); bool copy_values = !values.is_contiguous(); bool copy_indices = !indices.is_contiguous(); if (dim == ndim - 1) { scan_innermost_dim_with_indices<scalar_t>(self_, values_, indices_, init, binary_op); } else { scan_outer_dim_with_indices<scalar_t>(self_, values_, indices_, dim, init, binary_op); } if (copy_values){ values.copy_(values_); } if (copy_indices){ indices.copy_(indices_); } } void cummax_helper_cuda(const Tensor& self, Tensor& values, Tensor& indices, int64_t dim) { TensorArg output_arg{ values, "output", 1 }; TensorArg indices_arg{ indices, "indices", 2 }; TensorArg input_arg{ self, "input", 3 }; checkAllSameGPU(__func__, {output_arg, indices_arg, input_arg}); AT_DISPATCH_ALL_TYPES_AND3(at::ScalarType::Bool, at::ScalarType::Half, at::ScalarType::BFloat16, self.scalar_type(), "cummax_cuda", [&]() { scalar_t init = self.is_floating_point() ? (-1*std::numeric_limits<scalar_t>::infinity()) : std::numeric_limits<scalar_t>::lowest(); scan_dim_with_indices<scalar_t>(self, values, indices, dim, init, std::greater_equal<scalar_t>()); }); } void cummin_helper_cuda(const Tensor& self, Tensor& values, Tensor& indices, int64_t dim) { TensorArg output_arg{ values, "output", 1 }; TensorArg indices_arg{ indices, "indices", 2 }; TensorArg input_arg{ self, "input", 3 }; checkAllSameGPU(__func__, {output_arg, indices_arg, input_arg}); AT_DISPATCH_ALL_TYPES_AND3(at::ScalarType::Bool, at::ScalarType::Half, at::ScalarType::BFloat16, self.scalar_type(), "cummin_cuda", [&]() { scalar_t init = self.is_floating_point() ? std::numeric_limits<scalar_t>::infinity() : std::numeric_limits<scalar_t>::max(); scan_dim_with_indices<scalar_t>(self, values, indices, dim, init, std::less_equal<scalar_t>()); }); } // TODO: The implementation of `tensor_kernel_scan_outer_dim` and // `tensor_kernel_scan_innermost_dim` is similar to // `tensor_kernel_scan_outer_dim_with_indices` // `tensor_kernel_scan_outer_dim_with_indices` and should be refactored to // remove the duplication. /* Perform an inclusive scan along an outer dimension of a tensor. * * - num_orows is the size of the flattened outer dimensions; * - num_irows is the size of the flattened inner dimensions; * - row_size is the size of the dimension along which to scan; * * The dimensions to the outside and inside of the specified dimension are considered as flattened. * Thread blocks with the same blockIdx.y process an "outer row" (i.e. an element of the flattened * outer dimensions, which contains several "inner rows"). * Each thread processes a single inner row at a time. */ template<typename scalar_t, class BinaryOp> __global__ void tensor_kernel_scan_outer_dim(scalar_t *tgt_, scalar_t *src_, const uint32_t num_orows, const uint32_t num_irows, const uint32_t row_size, const scalar_t init, BinaryOp binary_op) { for (uint32_t orow = blockIdx.x; orow < num_orows; orow += gridDim.x) { for (uint32_t irow = blockIdx.y * blockDim.x + threadIdx.x; irow < num_irows; irow += gridDim.y * blockDim.x) { scalar_t *src = src_ + orow * row_size * num_irows + irow; scalar_t *tgt = tgt_ + orow * row_size * num_irows + irow; scalar_t acc = init; for (uint32_t col = 0; col < row_size; ++col) { acc = binary_op(acc, *src); *tgt = acc; src += num_irows; tgt += num_irows; } } } } /* Perform an inclusive scan along the innermost dimension of a tensor. * * - num_rows is the size of the flattened outer dimensions; * - row_size is the size of the innermost dimension; * * The outer dimensions of the tensor are considered as a single dimension, i.e. the tensor is * considered as having 'num_rows' rows of size 'row_size'. * Each thread block processes one or more sets of contiguous rows (processing multiple rows * per thread block is quicker than processing a single row, especially for short rows). */ template<typename T, int num_threads_x, int num_threads_y, class BinaryFunction> __device__ void tensor_kernel_scan_innermost_dim_impl(T* row_buf, T *tgt_, T *src_, const uint32_t num_rows, const uint32_t row_size, T init, BinaryFunction binary_op){ for (uint32_t block_row = blockIdx.x * blockDim.y; block_row < num_rows; block_row += blockDim.y * gridDim.x) { uint32_t row = block_row + threadIdx.y; T block_total = init; T *row_src = src_ + row * row_size; T *row_tgt = tgt_ + row * row_size; // Perform scan on one block at a time, keeping track of the total value of // all blocks processed so far. for (uint32_t block_col = 0; block_col < row_size; block_col += 2 * num_threads_x) { // Load data into shared memory (two values per thread). uint32_t col1 = block_col + threadIdx.x; uint32_t col2 = block_col + num_threads_x + threadIdx.x; if (row < num_rows) { if (col1 < row_size) { row_buf[threadIdx.x] = row_src[col1]; } else { row_buf[threadIdx.x] = init; } if (col2 < row_size) { row_buf[num_threads_x + threadIdx.x] = row_src[col2]; } else { row_buf[num_threads_x + threadIdx.x] = init; } // Add the total value of all previous blocks to the first value of this block. if (threadIdx.x == 0) { row_buf[0] = binary_op(row_buf[0], block_total); } } __syncthreads(); // Parallel reduction (up-sweep). for (uint32_t s = num_threads_x, d = 1; s >= 1; s >>= 1, d <<= 1) { if (row < num_rows && threadIdx.x < s) { uint32_t offset = (2 * threadIdx.x + 1) * d - 1; row_buf[offset + d] = binary_op(row_buf[offset], row_buf[offset + d]); } __syncthreads(); } // Down-sweep. for (uint32_t s = 2, d = num_threads_x / 2; d >= 1; s <<= 1, d >>= 1) { if (row < num_rows && threadIdx.x < s - 1) { uint32_t offset = 2 * (threadIdx.x + 1) * d - 1; row_buf[offset + d] = binary_op(row_buf[offset], row_buf[offset + d]); } __syncthreads(); } // Write back to output. if (row < num_rows) { if (col1 < row_size) row_tgt[col1] = row_buf[threadIdx.x]; if (col2 < row_size) row_tgt[col2] = row_buf[num_threads_x + threadIdx.x]; } block_total = row_buf[2 * num_threads_x - 1]; __syncthreads(); } } } template < typename T, int num_threads_x, int num_threads_y, class BinaryFunction> __global__ typename std::enable_if<!c10::is_complex<T>::value, void>::type tensor_kernel_scan_innermost_dim( T* tgt_, T* src_, const uint32_t num_rows, const uint32_t row_size, T init, BinaryFunction binary_op) { __shared__ T sbuf[num_threads_y][2 * num_threads_x]; T* row_buf = sbuf[threadIdx.y]; tensor_kernel_scan_innermost_dim_impl<T, num_threads_x, num_threads_y>( row_buf, tgt_, src_, num_rows, row_size, init, binary_op); } template < typename T, int num_threads_x, int num_threads_y, class BinaryFunction> __global__ typename std::enable_if<c10::is_complex<T>::value, void>::type tensor_kernel_scan_innermost_dim( T* tgt_, T* src_, const uint32_t num_rows, const uint32_t row_size, T init, BinaryFunction binary_op) { // As we cannot directly initialize shared array for complex types // Reference: // `error: initializer not allowed for __shared__ variable` // We instead get the base scalar type and allocate twice number of // elements required of base type and reinterpret them as complex. using base_t = typename scalar_value_type<T>::type; __shared__ base_t sbuf[num_threads_y][4 * num_threads_x]; T* row_buf = reinterpret_cast<T*>(sbuf[threadIdx.y]); tensor_kernel_scan_innermost_dim_impl<T, num_threads_x, num_threads_y>( row_buf, tgt_, src_, num_rows, row_size, init, binary_op); } template<typename scalar_t, class BinaryFunction> __host__ void scan_outer_dim(const Tensor& self, Tensor& result, int dim, scalar_t init, BinaryFunction binary_op) { const int64_t row_size = self.size(dim); auto sizes = self.sizes(); // Treat all outer dimensions (i.e. dim_ < dim) as one. const int64_t num_orows = c10::multiply_integers(sizes.begin(), sizes.begin() + dim); // Treat all inner dimensions (i.e. dim > dimension) as one. const int64_t num_irows = c10::multiply_integers(sizes.begin() + dim + 1, sizes.end()); dim3 threads(std::min(512, int(num_irows))); int64_t maxGridDim = at::cuda::getCurrentDeviceProperties()->maxGridSize[1]; dim3 grid(std::min(maxGridDim, num_orows), std::min(maxGridDim, ceil_div(num_irows, int64_t{threads.x}))); check_fits_in_unsigned(num_irows, "num_irows"); check_fits_in_unsigned(num_orows, "num_orows"); check_fits_in_unsigned(row_size, "row_size"); tensor_kernel_scan_outer_dim<scalar_t><<<grid, threads, 0, at::cuda::getCurrentCUDAStream()>>>( result.data_ptr<scalar_t>(), self.data_ptr<scalar_t>(), num_orows, num_irows, row_size, init, binary_op); C10_CUDA_KERNEL_LAUNCH_CHECK(); } template <typename scalar_t, class BinaryFunction> void scan_innermost_dim(const Tensor& self, Tensor& result, scalar_t init, BinaryFunction binary_op) { int64_t ndim = self.dim(); // Treat all outer dimensions as a single dimension. int64_t row_size = self.size(ndim - 1); int64_t num_rows = self.numel() / row_size; dim3 threads(16, 32); int64_t maxGridDim = at::cuda::getCurrentDeviceProperties()->maxGridSize[0]; dim3 grid(std::min(maxGridDim, ceil_div(num_rows, int64_t{threads.y}))); check_fits_in_unsigned(num_rows, "Number of rows (self.numel()/self.size(self.dim()-1))"); check_fits_in_unsigned(row_size, "row_size"); tensor_kernel_scan_innermost_dim<scalar_t, 16, 32><<<grid, threads, 0, at::cuda::getCurrentCUDAStream()>>>( result.data_ptr<scalar_t>(), self.data_ptr<scalar_t>(), num_rows, row_size, init, binary_op); C10_CUDA_KERNEL_LAUNCH_CHECK(); } template<typename scalar_t, typename BinaryFunction> void scan_dim(const Tensor& self, const Tensor& result, int64_t dim, scalar_t init, BinaryFunction binary_op) { int ndim = self.dim(); Tensor self_ = self.contiguous(); bool copy_result = !result.is_contiguous(); Tensor result_ = result.contiguous(); if (self.numel() == self.size(dim)) { cuda::cub::inclusive_scan(self_.data_ptr<scalar_t>(), result_.data_ptr<scalar_t>(), binary_op, self.numel()); } else if (dim == ndim - 1) { scan_innermost_dim<scalar_t>(self_, result_, init, binary_op); } else { scan_outer_dim<scalar_t>(self_, result_, dim, init, binary_op); } if (copy_result) { result.copy_(result_); } } Tensor& _logcumsumexp_out_cuda(const Tensor& self, int64_t dim, Tensor& result) { result.resize_(self.sizes()); if (self.dim() == 0) { result.fill_(self); return result; } if (self.numel() == 0) { result.zero_(); return result; } auto wrap_dim = maybe_wrap_dim(dim, self.dim()); TensorArg output_arg{ result, "output", 1 }; TensorArg input_arg{ self, "input", 2 }; checkAllSameGPU(__func__, {output_arg, input_arg}); AT_DISPATCH_FLOATING_TYPES_AND2( ScalarType::Half, ScalarType::BFloat16, self.scalar_type(), "logcumsumexp_cuda", [&]() { using accscalar_t = acc_type<scalar_t, true>; scalar_t init = -std::numeric_limits<scalar_t>::infinity(); auto log_add_exp = [] C10_HOST_DEVICE (const scalar_t x, const scalar_t y) -> scalar_t { scalar_t min = at::_isnan(y) ? y : std::min<scalar_t>(x,y); //std::min returns first arg if one of the args is nan scalar_t max = at::_isnan(y) ? y : std::max<scalar_t>(x,y); //std::max returns first arg if one of the args is nan if (min != max || ::isfinite(static_cast<accscalar_t>(min))) { // nan will be propagated here return ::log1p(std::exp(min - max)) + max; } else { // special case to correctly handle infinite inputs return x; } }; scan_dim<scalar_t>(self, result, wrap_dim, init, log_add_exp); }); return result; } Tensor _logcumsumexp_cuda(const Tensor& self, int64_t dim) { Tensor result = at::empty_like(self, MemoryFormat::Contiguous); return _logcumsumexp_out_cuda(self, dim, result); } void cumsum_cuda_kernel(const Tensor& result, const Tensor& self, int64_t dim) { AT_DISPATCH_ALL_TYPES_AND_COMPLEX_AND2( ScalarType::Half, ScalarType::BFloat16, self.scalar_type(), "cumsum_cuda", [&]() { scalar_t init = 0; scan_dim<scalar_t>( self, result, dim, init, std::plus<scalar_t>()); }); } void cumprod_cuda_kernel(const Tensor& result, const Tensor& self, int64_t dim) { AT_DISPATCH_ALL_TYPES_AND_COMPLEX_AND2( ScalarType::Half, ScalarType::BFloat16, self.scalar_type(), "cumprod_cuda", [&]() { scalar_t init = 1; scan_dim<scalar_t>( self, result, dim, init, std::multiplies<scalar_t>()); }); } REGISTER_DISPATCH(cumsum_stub, &cumsum_cuda_kernel); REGISTER_DISPATCH(cumprod_stub, &cumprod_cuda_kernel); }} // namespace at::native
f36e6243f7c57adbee4805b77803d701942f2df2.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /* * Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved. * * Permission is hereby granted, free of charge, to any person obtaining a * copy of this software and associated documentation files (the "Software"), * to deal in the Software without restriction, including without limitation * the rights to use, copy, modify, merge, publish, distribute, sublicense, * and/or sell copies of the Software, and to permit persons to whom the * Software is furnished to do so, subject to the following conditions: * * The above copyright notice and this permission notice shall be included in * all copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER * DEALINGS IN THE SOFTWARE. */ #include <algorithm> #include <hip/hip_fp16.h> #include <cassert> #include "NonZero.hpp" //sds:splitindex0 //sds: index,The index of the output tensor. //sds, nvinfer1::Dims NonZeroPlugin::getOutputDimensions(int index, const nvinfer1::Dims *inputDims, int nbInputs) { // 'equal' and 'less' is 2, 'where' is 3 assert(nbInputs == 1); assert(index == 0);// only one output nvinfer1::Dims const& input_dims = inputDims[0]; //output_dims == input_dims[0] //nvinfer1::Dims output_dims(input_dims.nbDims, _numbers); //sds lement0(constantOfShape,element1).rows*numbers _rows = input_dims.nbDims; _numbers = 1; for(int i = 0; i <_rows ;i++) { _numbers *= input_dims.d[i]; } nvinfer1::Dims output_dims; output_dims.nbDims=2; output_dims.d[0]=_rows; output_dims.d[1]=_numbers; output_dims.type[0] = nvinfer1::DimensionType::kCHANNEL; for( int i=1; i<output_dims.nbDims; ++i ) { output_dims.type[i] = nvinfer1::DimensionType::kSPATIAL; } return output_dims; } //sds,. int NonZeroPlugin::initialize() { nvinfer1::Dims dims = this->getInputDims(0); //_rows = dims.nbDims; //_numbers = 1; int* hLensOfDim = new int[_rows]; int* hmulOfSon = new int[_rows]; for( int i=0; i<=dims.nbDims-1; i++ ) { //_numbers *= dims.d[i]; hLensOfDim[i]= dims.d[i]; } hmulOfSon[_rows-1]=1; for( int i=_rows-2; i>=0; i-- ) { hmulOfSon[i]=hmulOfSon[i+1] * hLensOfDim[i+1]; } int length= sizeof(int) * _rows; CHECK_CUDA(hipMalloc((void**)&_lensOfDim, length)); CHECK_CUDA(hipMemcpy(_lensOfDim, hLensOfDim, length, hipMemcpyHostToDevice)); CHECK_CUDA(hipMalloc((void**)&_mulOfSon, length)); CHECK_CUDA(hipMemcpy(_mulOfSon, hmulOfSon, length, hipMemcpyHostToDevice)); delete []hLensOfDim; delete []hmulOfSon; return 0; } template<typename T> __global__ void non_zero_kernel(const int columns, int* _lenOfDim, int* _mulOfSon,T * __restrict__ y) { int x_index = blockIdx.x * blockDim.x + threadIdx.x; if(x_index < columns) { int y_index = threadIdx.y; //sds,element y[y_index*columns + x_index] = (T)((x_index/_mulOfSon[y_index]) % _lenOfDim[y_index]); } } //sds-temp, : element 1. output dim. int NonZeroPlugin::enqueue(int batchSize, const void *const *inputs, void **outputs, void *workspace, hipStream_t stream) { //float const* idata1 = reinterpret_cast<float const*>(inputs[0]); float * odatas = reinterpret_cast<float *>(outputs[0]); //dims(x,y,z) dim3 block(512, _rows); dim3 grid( (_numbers + 512 - 1) / 512, 1); extern __shared__ int lensOfDim[]; hipLaunchKernelGGL(( non_zero_kernel), dim3(grid), dim3(block), 0, stream, _numbers, _lensOfDim, _mulOfSon, odatas); gdb_copy_to_cpu("NonZero output", odatas, _numbers); return hipGetLastError() != hipSuccess; }
f36e6243f7c57adbee4805b77803d701942f2df2.cu
/* * Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved. * * Permission is hereby granted, free of charge, to any person obtaining a * copy of this software and associated documentation files (the "Software"), * to deal in the Software without restriction, including without limitation * the rights to use, copy, modify, merge, publish, distribute, sublicense, * and/or sell copies of the Software, and to permit persons to whom the * Software is furnished to do so, subject to the following conditions: * * The above copyright notice and this permission notice shall be included in * all copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER * DEALINGS IN THE SOFTWARE. */ #include <algorithm> #include <cuda_fp16.h> #include <cassert> #include "NonZero.hpp" //sds:对于这个split来说,这里的index必须是0,表示只有一个输出 //sds: index,The index of the output tensor. //sds,仅有此函数是创建引擎时执行。不要依赖其他函数 nvinfer1::Dims NonZeroPlugin::getOutputDimensions(int index, const nvinfer1::Dims *inputDims, int nbInputs) { // 'equal' and 'less' is 2, 'where' is 3 assert(nbInputs == 1); assert(index == 0);// only one output nvinfer1::Dims const& input_dims = inputDims[0]; //output_dims == input_dims[0] //nvinfer1::Dims output_dims(input_dims.nbDims, _numbers); //sds 输入的每个lement必须都是非0(实际上前层是constantOfShape,每个element都是1).满足这个条件,则输出必是rows*numbers _rows = input_dims.nbDims; _numbers = 1; for(int i = 0; i <_rows ;i++) { _numbers *= input_dims.d[i]; } nvinfer1::Dims output_dims; output_dims.nbDims=2; output_dims.d[0]=_rows; output_dims.d[1]=_numbers; output_dims.type[0] = nvinfer1::DimensionType::kCHANNEL; for( int i=1; i<output_dims.nbDims; ++i ) { output_dims.type[i] = nvinfer1::DimensionType::kSPATIAL; } return output_dims; } //sds,此函数是运行时执行.创建引擎时不会执行。 int NonZeroPlugin::initialize() { nvinfer1::Dims dims = this->getInputDims(0); //_rows = dims.nbDims; //_numbers = 1; int* hLensOfDim = new int[_rows]; int* hmulOfSon = new int[_rows]; for( int i=0; i<=dims.nbDims-1; i++ ) { //_numbers *= dims.d[i]; hLensOfDim[i]= dims.d[i]; } hmulOfSon[_rows-1]=1; for( int i=_rows-2; i>=0; i-- ) { hmulOfSon[i]=hmulOfSon[i+1] * hLensOfDim[i+1]; } int length= sizeof(int) * _rows; CHECK_CUDA(cudaMalloc((void**)&_lensOfDim, length)); CHECK_CUDA(cudaMemcpy(_lensOfDim, hLensOfDim, length, cudaMemcpyHostToDevice)); CHECK_CUDA(cudaMalloc((void**)&_mulOfSon, length)); CHECK_CUDA(cudaMemcpy(_mulOfSon, hmulOfSon, length, cudaMemcpyHostToDevice)); delete []hLensOfDim; delete []hmulOfSon; return 0; } template<typename T> __global__ void non_zero_kernel(const int columns, int* _lenOfDim, int* _mulOfSon,T * __restrict__ y) { int x_index = blockIdx.x * blockDim.x + threadIdx.x; if(x_index < columns) { int y_index = threadIdx.y; //sds,每个element赋值满足下面的规律 y[y_index*columns + x_index] = (T)((x_index/_mulOfSon[y_index]) % _lenOfDim[y_index]); } } //sds-temp,仅支持特例 : 所有element 都是1. output dim在初始化引擎时确定. 此情况下输出满足特定规律。 int NonZeroPlugin::enqueue(int batchSize, const void *const *inputs, void **outputs, void *workspace, cudaStream_t stream) { //float const* idata1 = reinterpret_cast<float const*>(inputs[0]); float * odatas = reinterpret_cast<float *>(outputs[0]); //dims(x,y,z) dim3 block(512, _rows); dim3 grid( (_numbers + 512 - 1) / 512, 1); extern __shared__ int lensOfDim[]; non_zero_kernel<<<grid, block, 0, stream>>>(_numbers, _lensOfDim, _mulOfSon, odatas); gdb_copy_to_cpu("NonZero output", odatas, _numbers); return cudaGetLastError() != cudaSuccess; }
d4d2e81aad5a1c7d66317d02986a759e544af43a.hip
// !!! This is a file automatically generated by hipify!!! #include "solveMatrix.h" #include <hip/hip_runtime.h> #include "cusolverDn.h" double *cu_A; double *b; double *Workspace; int *devIpiv; __device__ __managed__ int devInfo; int die(cusolverStatus_t status, int devInfo, hipsolverDnHandle_t handle); int err(cusolverStatus_t status, int devInfo); void freeMem(); int solveMatrix(double *A_in, int n, double *b_in, double *x_out) { hipsolverDnHandle_t handle; cusolverStatus_t status; status = hipsolverDnCreate(&handle); if(status!=CUSOLVER_STATUS_SUCCESS) return 0; int Lwork; hipMallocManaged(&cu_A, (size_t) (n*n*sizeof(double))); hipMallocManaged(&b, (size_t) (n*sizeof(double))); hipMallocManaged(&devIpiv, (size_t) n*sizeof(int)); hipMemcpy(cu_A, A_in, n*n*sizeof(double), hipMemcpyHostToDevice); hipMemcpy(b, b_in, n*sizeof(double), hipMemcpyHostToDevice); hipDeviceSynchronize(); status = hipsolverDnDgetrf_bufferSize(handle, n, n, cu_A, n, &Lwork ); hipDeviceSynchronize(); if(err(status,0)) return die(status, 0, handle); hipMallocManaged(&Workspace, (size_t) Lwork*sizeof(double)); status = hipsolverDnDgetrf(handle, n, n, cu_A, n, Workspace, devIpiv, &devInfo); hipDeviceSynchronize(); if(err(status,0)) return die(status, devInfo, handle); status = hipsolverDnDgetrs(handle, HIPBLAS_OP_T, n, 1, cu_A, n, devIpiv, b, n, &devInfo ); hipDeviceSynchronize(); if(err(status,0)) return die(status, devInfo, handle); hipMemcpy(x_out, b, n*sizeof(double), hipMemcpyDeviceToHost); hipsolverDnDestroy(handle); freeMem(); return 0; } int die(cusolverStatus_t status, int devInfo, hipsolverDnHandle_t handle) { hipsolverDnDestroy(handle); freeMem(); return err(status, devInfo); } int err(cusolverStatus_t status, int devInfo) { if(status!=CUSOLVER_STATUS_SUCCESS) return 1; if(devInfo!=0) return devInfo; return 0; } void freeMem() { hipFree(cu_A); hipFree(b); hipFree(devIpiv); }
d4d2e81aad5a1c7d66317d02986a759e544af43a.cu
#include "solveMatrix.h" #include <cuda_runtime.h> #include "cusolverDn.h" double *cu_A; double *b; double *Workspace; int *devIpiv; __device__ __managed__ int devInfo; int die(cusolverStatus_t status, int devInfo, cusolverDnHandle_t handle); int err(cusolverStatus_t status, int devInfo); void freeMem(); int solveMatrix(double *A_in, int n, double *b_in, double *x_out) { cusolverDnHandle_t handle; cusolverStatus_t status; status = cusolverDnCreate(&handle); if(status!=CUSOLVER_STATUS_SUCCESS) return 0; int Lwork; cudaMallocManaged(&cu_A, (size_t) (n*n*sizeof(double))); cudaMallocManaged(&b, (size_t) (n*sizeof(double))); cudaMallocManaged(&devIpiv, (size_t) n*sizeof(int)); cudaMemcpy(cu_A, A_in, n*n*sizeof(double), cudaMemcpyHostToDevice); cudaMemcpy(b, b_in, n*sizeof(double), cudaMemcpyHostToDevice); cudaDeviceSynchronize(); status = cusolverDnDgetrf_bufferSize(handle, n, n, cu_A, n, &Lwork ); cudaDeviceSynchronize(); if(err(status,0)) return die(status, 0, handle); cudaMallocManaged(&Workspace, (size_t) Lwork*sizeof(double)); status = cusolverDnDgetrf(handle, n, n, cu_A, n, Workspace, devIpiv, &devInfo); cudaDeviceSynchronize(); if(err(status,0)) return die(status, devInfo, handle); status = cusolverDnDgetrs(handle, CUBLAS_OP_T, n, 1, cu_A, n, devIpiv, b, n, &devInfo ); cudaDeviceSynchronize(); if(err(status,0)) return die(status, devInfo, handle); cudaMemcpy(x_out, b, n*sizeof(double), cudaMemcpyDeviceToHost); cusolverDnDestroy(handle); freeMem(); return 0; } int die(cusolverStatus_t status, int devInfo, cusolverDnHandle_t handle) { cusolverDnDestroy(handle); freeMem(); return err(status, devInfo); } int err(cusolverStatus_t status, int devInfo) { if(status!=CUSOLVER_STATUS_SUCCESS) return 1; if(devInfo!=0) return devInfo; return 0; } void freeMem() { cudaFree(cu_A); cudaFree(b); cudaFree(devIpiv); }
e584cca8323ceb88107d9159def35bad3c0d8cc3.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include<cuda.h> #include<cuda_runtime.h> #include<stdio.h> #include<stdlib.h> #include<cmath> #define TILE_SIZE 32 //Tile size and block size, both are taken as 32 __device__ void store_full(float*,float*,int,int,int); __device__ void load_full(float*,float*,int,int,int); __device__ void store_lower(float*,float*,int,int,int); __device__ void load_lower(float*,float*,int,int,int); __device__ void potrf_tile(float*); __device__ void trsm_tile(float*,int,int,int); __device__ void syrk_tile(float*,float*,int,int,int); __global__ void right_looking_launch_kernel(float*,int); __device__ void store_full(float* read_data,float* write_data,int i,int j,int N) { int global_y = j*blockDim.y + threadIdx.y; int global_x = i*blockDim.x + threadIdx.x; write_data[global_y*N + global_x] = read_data[threadIdx.x + (TILE_SIZE+1)*threadIdx.y]; __syncthreads(); } __device__ void load_full(float* read_data,float* write_data,int i,int j,int N) { int global_y = j*blockDim.y + threadIdx.y; int global_x = i*blockDim.x + threadIdx.x; write_data[threadIdx.x + (TILE_SIZE+1)*threadIdx.y] = read_data[global_y*N + global_x]; __syncthreads(); } __device__ void store_lower(float* read_data,float* write_data,int i,int j,int N) { int global_y = j*blockDim.y + threadIdx.y; int global_x = i*blockDim.x + threadIdx.x; if(threadIdx.y >= threadIdx.x) write_data[global_y*N + global_x] = read_data[threadIdx.x + (TILE_SIZE+1)*threadIdx.y]; else write_data[global_y*N + global_x] = 0.0; __syncthreads(); } __device__ void load_lower(float* read_data,float* write_data,int i,int j,int N) { int global_y = j*blockDim.y + threadIdx.y; int global_x = i*blockDim.x + threadIdx.x; if(threadIdx.y >= threadIdx.x) write_data[threadIdx.x + (TILE_SIZE+1)*threadIdx.y] = read_data[global_y*N + global_x]; else write_data[threadIdx.x + (TILE_SIZE+1)*threadIdx.y] = 0.0; __syncthreads(); } __device__ void potrf_tile(float* t_A) { int t_x = threadIdx.x; int t_y = threadIdx.y; __shared__ float temp2; // Using shared memory to Optimize for(int k=0;k<TILE_SIZE;k++) { if(t_x==t_y && t_x==k) { t_A[k*(TILE_SIZE+1) + k] = sqrtf(t_A[k*(TILE_SIZE+1) + k]); temp2 = t_A[k*(TILE_SIZE+1) + k]; } __syncthreads(); if(t_x<t_y && t_x == k) { t_A[t_y*(TILE_SIZE+1) + k]/= temp2; } __syncthreads(); if(k<t_y && k<t_x && t_x<=t_y) { t_A[t_y*(TILE_SIZE+1) + t_x]-= t_A[t_x*(TILE_SIZE+1) + k]*t_A[t_y*(TILE_SIZE+1) + k]; } __syncthreads(); } } __device__ void trsm_tile(float *read_data,int i,int j,int N) { int global_y = j*blockDim.y + threadIdx.y; int global_x = i*blockDim.x + threadIdx.x; int t_x = threadIdx.x; int t_y = threadIdx.y; for(int s=0;s<TILE_SIZE;s++) { if(t_x==s) { read_data[global_y*N + global_x]/= read_data[global_x*N + global_x]; } __syncthreads(); if(t_x > s) { read_data[global_y*N + global_x]-= read_data[global_x*N + global_x - t_x + s]*read_data[global_y*N + global_x - t_x + s]; } __syncthreads(); } } __device__ void syrk_tile(float* read_data,float* rA2,int i,int j,int k,int N) { int global_y = j*blockDim.y + threadIdx.y; int global_x = k*blockDim.x + threadIdx.x; int t_y = threadIdx.y; int t_x = threadIdx.x; __shared__ float temp0[TILE_SIZE][TILE_SIZE+1]; // Using shared memory to Optimize and Using TILE_SIZE+1 to avoid Band-conflict in Shared Memory __shared__ float temp1[TILE_SIZE][TILE_SIZE+1]; // Using shared memory to Optimize and Using TILE_SIZE+1 to avoid Band-conflict in Shared Memory temp0[t_y][t_x] = read_data[global_x*N + i*blockDim.x + t_y]; temp1[t_x][t_y] = read_data[global_y*N + i*blockDim.x + t_x]; __syncthreads(); float valueToSubtract = 0.0; for(int r=0;r<TILE_SIZE;r++) { valueToSubtract+= temp0[r][t_x]*temp1[r][t_y]; } rA2[t_y*(TILE_SIZE+1) + t_x]-= valueToSubtract; __syncthreads(); } __global__ void right_looking_launch_kernel(float* read_data,int N) { __shared__ float block_data[TILE_SIZE*(TILE_SIZE+1)]; // Using TILE_SIZE+1 to avoid Band-conflict in Shared Memory int i,j,k; for(i=0;i<N/TILE_SIZE;i++) { load_lower(read_data,block_data,i,i,N); potrf_tile(block_data); store_lower(block_data,read_data,i,i,N); for(j=i+1;j<N/TILE_SIZE;j++) { trsm_tile(read_data,i,j,N); for(k=i+1;k<((N/TILE_SIZE)-1);k++) { load_full(read_data,block_data,k,j,N); syrk_tile(read_data,block_data,i,j,k,N); store_full(block_data,read_data,k,j,N); } load_lower(read_data,block_data,k,j,N); syrk_tile(read_data,block_data,i,j,k,N); store_lower(block_data,read_data,k,j,N); } } }
e584cca8323ceb88107d9159def35bad3c0d8cc3.cu
#include<cuda.h> #include<cuda_runtime.h> #include<stdio.h> #include<stdlib.h> #include<cmath> #define TILE_SIZE 32 //Tile size and block size, both are taken as 32 __device__ void store_full(float*,float*,int,int,int); __device__ void load_full(float*,float*,int,int,int); __device__ void store_lower(float*,float*,int,int,int); __device__ void load_lower(float*,float*,int,int,int); __device__ void potrf_tile(float*); __device__ void trsm_tile(float*,int,int,int); __device__ void syrk_tile(float*,float*,int,int,int); __global__ void right_looking_launch_kernel(float*,int); __device__ void store_full(float* read_data,float* write_data,int i,int j,int N) { int global_y = j*blockDim.y + threadIdx.y; int global_x = i*blockDim.x + threadIdx.x; write_data[global_y*N + global_x] = read_data[threadIdx.x + (TILE_SIZE+1)*threadIdx.y]; __syncthreads(); } __device__ void load_full(float* read_data,float* write_data,int i,int j,int N) { int global_y = j*blockDim.y + threadIdx.y; int global_x = i*blockDim.x + threadIdx.x; write_data[threadIdx.x + (TILE_SIZE+1)*threadIdx.y] = read_data[global_y*N + global_x]; __syncthreads(); } __device__ void store_lower(float* read_data,float* write_data,int i,int j,int N) { int global_y = j*blockDim.y + threadIdx.y; int global_x = i*blockDim.x + threadIdx.x; if(threadIdx.y >= threadIdx.x) write_data[global_y*N + global_x] = read_data[threadIdx.x + (TILE_SIZE+1)*threadIdx.y]; else write_data[global_y*N + global_x] = 0.0; __syncthreads(); } __device__ void load_lower(float* read_data,float* write_data,int i,int j,int N) { int global_y = j*blockDim.y + threadIdx.y; int global_x = i*blockDim.x + threadIdx.x; if(threadIdx.y >= threadIdx.x) write_data[threadIdx.x + (TILE_SIZE+1)*threadIdx.y] = read_data[global_y*N + global_x]; else write_data[threadIdx.x + (TILE_SIZE+1)*threadIdx.y] = 0.0; __syncthreads(); } __device__ void potrf_tile(float* t_A) { int t_x = threadIdx.x; int t_y = threadIdx.y; __shared__ float temp2; // Using shared memory to Optimize for(int k=0;k<TILE_SIZE;k++) { if(t_x==t_y && t_x==k) { t_A[k*(TILE_SIZE+1) + k] = sqrtf(t_A[k*(TILE_SIZE+1) + k]); temp2 = t_A[k*(TILE_SIZE+1) + k]; } __syncthreads(); if(t_x<t_y && t_x == k) { t_A[t_y*(TILE_SIZE+1) + k]/= temp2; } __syncthreads(); if(k<t_y && k<t_x && t_x<=t_y) { t_A[t_y*(TILE_SIZE+1) + t_x]-= t_A[t_x*(TILE_SIZE+1) + k]*t_A[t_y*(TILE_SIZE+1) + k]; } __syncthreads(); } } __device__ void trsm_tile(float *read_data,int i,int j,int N) { int global_y = j*blockDim.y + threadIdx.y; int global_x = i*blockDim.x + threadIdx.x; int t_x = threadIdx.x; int t_y = threadIdx.y; for(int s=0;s<TILE_SIZE;s++) { if(t_x==s) { read_data[global_y*N + global_x]/= read_data[global_x*N + global_x]; } __syncthreads(); if(t_x > s) { read_data[global_y*N + global_x]-= read_data[global_x*N + global_x - t_x + s]*read_data[global_y*N + global_x - t_x + s]; } __syncthreads(); } } __device__ void syrk_tile(float* read_data,float* rA2,int i,int j,int k,int N) { int global_y = j*blockDim.y + threadIdx.y; int global_x = k*blockDim.x + threadIdx.x; int t_y = threadIdx.y; int t_x = threadIdx.x; __shared__ float temp0[TILE_SIZE][TILE_SIZE+1]; // Using shared memory to Optimize and Using TILE_SIZE+1 to avoid Band-conflict in Shared Memory __shared__ float temp1[TILE_SIZE][TILE_SIZE+1]; // Using shared memory to Optimize and Using TILE_SIZE+1 to avoid Band-conflict in Shared Memory temp0[t_y][t_x] = read_data[global_x*N + i*blockDim.x + t_y]; temp1[t_x][t_y] = read_data[global_y*N + i*blockDim.x + t_x]; __syncthreads(); float valueToSubtract = 0.0; for(int r=0;r<TILE_SIZE;r++) { valueToSubtract+= temp0[r][t_x]*temp1[r][t_y]; } rA2[t_y*(TILE_SIZE+1) + t_x]-= valueToSubtract; __syncthreads(); } __global__ void right_looking_launch_kernel(float* read_data,int N) { __shared__ float block_data[TILE_SIZE*(TILE_SIZE+1)]; // Using TILE_SIZE+1 to avoid Band-conflict in Shared Memory int i,j,k; for(i=0;i<N/TILE_SIZE;i++) { load_lower(read_data,block_data,i,i,N); potrf_tile(block_data); store_lower(block_data,read_data,i,i,N); for(j=i+1;j<N/TILE_SIZE;j++) { trsm_tile(read_data,i,j,N); for(k=i+1;k<((N/TILE_SIZE)-1);k++) { load_full(read_data,block_data,k,j,N); syrk_tile(read_data,block_data,i,j,k,N); store_full(block_data,read_data,k,j,N); } load_lower(read_data,block_data,k,j,N); syrk_tile(read_data,block_data,i,j,k,N); store_lower(block_data,read_data,k,j,N); } } }
0b2e525e28bf63535a971462000b29171deca63e.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /* Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ #include <algorithm> #include "hipcub/hipcub.hpp" #include "paddle/fluid/operators/bce_loss_op.h" #include "paddle/fluid/operators/math.h" #include "paddle/fluid/platform/cuda_primitives.h" #include "paddle/fluid/platform/gpu_launch_config.h" #include "paddle/fluid/platform/hostdevice.h" namespace paddle { namespace operators { using Tensor = framework::Tensor; template <typename T> __global__ void GPUBCELossForward(const T* x_data, const T* label_data, T* out_data, const int in_numel) { CUDA_KERNEL_LOOP(i, in_numel) { T x = x_data[i]; T label = label_data[i]; T one = static_cast<T>(1.); T neg_100 = static_cast<T>(-100.); T term1 = max(real_log(x), neg_100); T term2 = max(real_log(one - x), neg_100); out_data[i] = ((label - one) * term2) - (label * term1); } } template <typename T> __global__ void GPUBCELossBackward(const T* x_data, const T* label_data, const T* dout_data, T* dx_data, const int in_numel) { CUDA_KERNEL_LOOP(i, in_numel) { T x = x_data[i]; T label = label_data[i]; T dout = dout_data[i]; T one = static_cast<T>(1.); T eps = static_cast<T>(1e-12); T term1 = max((one - x) * x, eps); dx_data[i] = dout * (x - label) / term1; } } template <typename DeviceContext, typename T> class BCELossCUDAKernel : public framework::OpKernel<T> { public: void Compute(const framework::ExecutionContext& ctx) const override { auto* x = ctx.Input<Tensor>("X"); auto* labels = ctx.Input<Tensor>("Label"); auto* out = ctx.Output<Tensor>("Out"); auto x_data = x->data<T>(); auto out_data = out->mutable_data<T>(ctx.GetPlace()); auto x_numel = x->numel(); platform::GpuLaunchConfig config = platform::getGpuLaunchConfig(x_numel, ctx); Tensor x_cpu; framework::TensorCopy(*x, platform::CPUPlace(), &x_cpu); T* x_cpu_data = x_cpu.data<T>(); for (int64_t i = 0; i < x_numel; ++i) { PADDLE_ENFORCE_GE( x_cpu_data[i], static_cast<T>(0), platform::errors::InvalidArgument( "Illegal input, input must be greater than or equal to 0")); PADDLE_ENFORCE_LE( x_cpu_data[i], static_cast<T>(1), platform::errors::InvalidArgument( "Illegal input, input must be less than or equal to 1")); } auto& dev_ctx = ctx.cuda_device_context(); hipLaunchKernelGGL(( GPUBCELossForward< T>), dim3(config.blocks), dim3(config.threads), 0, dev_ctx.stream(), x_data, labels->data<T>(), out_data, x_numel); } }; template <typename DeviceContext, typename T> class BCELossGradCUDAKernel : public framework::OpKernel<T> { public: void Compute(const framework::ExecutionContext& ctx) const override { auto* x = ctx.Input<Tensor>("X"); auto* labels = ctx.Input<Tensor>("Label"); auto* dout = ctx.Input<Tensor>(framework::GradVarName("Out")); auto* dx = ctx.Output<Tensor>(framework::GradVarName("X")); auto dx_data = dx->mutable_data<T>(ctx.GetPlace()); int x_numel = x->numel(); platform::GpuLaunchConfig config = platform::getGpuLaunchConfig(x_numel, ctx); auto& dev_ctx = ctx.cuda_device_context(); hipLaunchKernelGGL(( GPUBCELossBackward< T>), dim3(config.blocks), dim3(config.threads), 0, dev_ctx.stream(), x->data<T>(), labels->data<T>(), dout->data<T>(), dx_data, x_numel); } }; } // namespace operators } // namespace paddle namespace ops = paddle::operators; REGISTER_OP_CUDA_KERNEL( bce_loss, ops::BCELossCUDAKernel<paddle::platform::CUDADeviceContext, float>, ops::BCELossCUDAKernel<paddle::platform::CUDADeviceContext, double>); REGISTER_OP_CUDA_KERNEL( bce_loss_grad, ops::BCELossGradCUDAKernel<paddle::platform::CUDADeviceContext, float>, ops::BCELossGradCUDAKernel<paddle::platform::CUDADeviceContext, double>);
0b2e525e28bf63535a971462000b29171deca63e.cu
/* Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ #include <algorithm> #include "cub/cub.cuh" #include "paddle/fluid/operators/bce_loss_op.h" #include "paddle/fluid/operators/math.h" #include "paddle/fluid/platform/cuda_primitives.h" #include "paddle/fluid/platform/gpu_launch_config.h" #include "paddle/fluid/platform/hostdevice.h" namespace paddle { namespace operators { using Tensor = framework::Tensor; template <typename T> __global__ void GPUBCELossForward(const T* x_data, const T* label_data, T* out_data, const int in_numel) { CUDA_KERNEL_LOOP(i, in_numel) { T x = x_data[i]; T label = label_data[i]; T one = static_cast<T>(1.); T neg_100 = static_cast<T>(-100.); T term1 = max(real_log(x), neg_100); T term2 = max(real_log(one - x), neg_100); out_data[i] = ((label - one) * term2) - (label * term1); } } template <typename T> __global__ void GPUBCELossBackward(const T* x_data, const T* label_data, const T* dout_data, T* dx_data, const int in_numel) { CUDA_KERNEL_LOOP(i, in_numel) { T x = x_data[i]; T label = label_data[i]; T dout = dout_data[i]; T one = static_cast<T>(1.); T eps = static_cast<T>(1e-12); T term1 = max((one - x) * x, eps); dx_data[i] = dout * (x - label) / term1; } } template <typename DeviceContext, typename T> class BCELossCUDAKernel : public framework::OpKernel<T> { public: void Compute(const framework::ExecutionContext& ctx) const override { auto* x = ctx.Input<Tensor>("X"); auto* labels = ctx.Input<Tensor>("Label"); auto* out = ctx.Output<Tensor>("Out"); auto x_data = x->data<T>(); auto out_data = out->mutable_data<T>(ctx.GetPlace()); auto x_numel = x->numel(); platform::GpuLaunchConfig config = platform::getGpuLaunchConfig(x_numel, ctx); Tensor x_cpu; framework::TensorCopy(*x, platform::CPUPlace(), &x_cpu); T* x_cpu_data = x_cpu.data<T>(); for (int64_t i = 0; i < x_numel; ++i) { PADDLE_ENFORCE_GE( x_cpu_data[i], static_cast<T>(0), platform::errors::InvalidArgument( "Illegal input, input must be greater than or equal to 0")); PADDLE_ENFORCE_LE( x_cpu_data[i], static_cast<T>(1), platform::errors::InvalidArgument( "Illegal input, input must be less than or equal to 1")); } auto& dev_ctx = ctx.cuda_device_context(); GPUBCELossForward< T><<<config.blocks, config.threads, 0, dev_ctx.stream()>>>( x_data, labels->data<T>(), out_data, x_numel); } }; template <typename DeviceContext, typename T> class BCELossGradCUDAKernel : public framework::OpKernel<T> { public: void Compute(const framework::ExecutionContext& ctx) const override { auto* x = ctx.Input<Tensor>("X"); auto* labels = ctx.Input<Tensor>("Label"); auto* dout = ctx.Input<Tensor>(framework::GradVarName("Out")); auto* dx = ctx.Output<Tensor>(framework::GradVarName("X")); auto dx_data = dx->mutable_data<T>(ctx.GetPlace()); int x_numel = x->numel(); platform::GpuLaunchConfig config = platform::getGpuLaunchConfig(x_numel, ctx); auto& dev_ctx = ctx.cuda_device_context(); GPUBCELossBackward< T><<<config.blocks, config.threads, 0, dev_ctx.stream()>>>( x->data<T>(), labels->data<T>(), dout->data<T>(), dx_data, x_numel); } }; } // namespace operators } // namespace paddle namespace ops = paddle::operators; REGISTER_OP_CUDA_KERNEL( bce_loss, ops::BCELossCUDAKernel<paddle::platform::CUDADeviceContext, float>, ops::BCELossCUDAKernel<paddle::platform::CUDADeviceContext, double>); REGISTER_OP_CUDA_KERNEL( bce_loss_grad, ops::BCELossGradCUDAKernel<paddle::platform::CUDADeviceContext, float>, ops::BCELossGradCUDAKernel<paddle::platform::CUDADeviceContext, double>);
53e2774dbeea96b4476f59b329afb70e0cd8960b.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include <stdlib.h> #include <stdio.h> #include <cstring> // needed for memset #include <typeinfo> #include <tune_quda.h> #include <blas_quda.h> #include <color_spinor_field.h> #include <jitify_helper.cuh> #include <kernels/multi_blas_core.cuh> namespace quda { namespace blas { hipStream_t* getStream(); template <int writeX, int writeY, int writeZ, int writeW> struct write { static constexpr int X = writeX; static constexpr int Y = writeY; static constexpr int Z = writeZ; static constexpr int W = writeW; }; namespace detail { template <unsigned... digits> struct to_chars { static const char value[]; }; template <unsigned... digits> const char to_chars<digits...>::value[] = {('0' + digits)..., 0}; template <unsigned rem, unsigned... digits> struct explode : explode<rem / 10, rem % 10, digits...> { }; template <unsigned... digits> struct explode<0, digits...> : to_chars<digits...> { }; } // namespace detail template <unsigned num> struct num_to_string : detail::explode<num / 10, num % 10> { }; template <int NXZ, typename FloatN, int M, typename SpinorX, typename SpinorY, typename SpinorZ, typename SpinorW, typename Functor, typename T> class MultiBlas : public TunableVectorY { private: const int NYW; const int nParity; mutable MultiBlasArg<NXZ, SpinorX, SpinorY, SpinorZ, SpinorW, Functor> arg; const coeff_array<T> &a, &b, &c; std::vector<ColorSpinorField *> &x, &y, &z, &w; // host pointers used for backing up fields when tuning // don't curry into the Spinors to minimize parameter size char *Y_h[MAX_MULTI_BLAS_N], *W_h[MAX_MULTI_BLAS_N], *Ynorm_h[MAX_MULTI_BLAS_N], *Wnorm_h[MAX_MULTI_BLAS_N]; bool tuneSharedBytes() const { return false; } public: MultiBlas(SpinorX X[], SpinorY Y[], SpinorZ Z[], SpinorW W[], Functor &f, const coeff_array<T> &a, const coeff_array<T> &b, const coeff_array<T> &c, std::vector<ColorSpinorField *> &x, std::vector<ColorSpinorField *> &y, std::vector<ColorSpinorField *> &z, std::vector<ColorSpinorField *> &w, int NYW, int length) : TunableVectorY(NYW), NYW(NYW), nParity(x[0]->SiteSubset()), arg(X, Y, Z, W, f, NYW, length / nParity), a(a), b(b), c(c), x(x), y(y), z(z), w(w), Y_h(), W_h(), Ynorm_h(), Wnorm_h() { Amatrix_h = reinterpret_cast<signed char *>(const_cast<T *>(a.data)); Bmatrix_h = reinterpret_cast<signed char *>(const_cast<T *>(b.data)); Cmatrix_h = reinterpret_cast<signed char *>(const_cast<T *>(c.data)); strcpy(aux, x[0]->AuxString()); if (x[0]->Precision() != y[0]->Precision()) { strcat(aux, ","); strcat(aux, y[0]->AuxString()); } #ifdef JITIFY ::quda::create_jitify_program("kernels/multi_blas_core.cuh"); #endif } virtual ~MultiBlas() {} inline TuneKey tuneKey() const { char name[TuneKey::name_n]; strcpy(name, num_to_string<NXZ>::value); strcat(name, std::to_string(NYW).c_str()); strcat(name, typeid(arg.f).name()); return TuneKey(x[0]->VolString(), name, aux); } inline void apply(const hipStream_t &stream) { TuneParam tp = tuneLaunch(*this, getTuning(), getVerbosity()); typedef typename scalar<FloatN>::type Float; typedef typename vector<Float, 2>::type Float2; #ifdef JITIFY using namespace jitify::reflection; auto instance = program->kernel("quda::blas::multiBlasKernel").instantiate(Type<FloatN>(), M, NXZ, Type<decltype(arg)>()); // FIXME - if NXZ=1 no need to copy entire array // FIXME - do we really need strided access here? if (a.data && a.use_const) { Float2 A[MAX_MATRIX_SIZE / sizeof(Float2)]; // since the kernel doesn't know the width of them matrix at compile // time we stride it and copy the padded matrix to GPU for (int i = 0; i < NXZ; i++) for (int j = 0; j < NYW; j++) A[MAX_MULTI_BLAS_N * i + j] = make_Float2<Float2>(Complex(a.data[NYW * i + j])); auto Amatrix_d = instance.get_constant_ptr("quda::blas::Amatrix_d"); cuMemcpyHtoDAsync(Amatrix_d, A, MAX_MATRIX_SIZE, *getStream()); } if (b.data && b.use_const) { Float2 B[MAX_MATRIX_SIZE / sizeof(Float2)]; // since the kernel doesn't know the width of them matrix at compile // time we stride it and copy the padded matrix to GPU for (int i = 0; i < NXZ; i++) for (int j = 0; j < NYW; j++) B[MAX_MULTI_BLAS_N * i + j] = make_Float2<Float2>(Complex(b.data[NYW * i + j])); auto Bmatrix_d = instance.get_constant_ptr("quda::blas::Bmatrix_d"); cuMemcpyHtoDAsync(Bmatrix_d, B, MAX_MATRIX_SIZE, *getStream()); } if (c.data && c.use_const) { Float2 C[MAX_MATRIX_SIZE / sizeof(Float2)]; // since the kernel doesn't know the width of them matrix at compile // time we stride it and copy the padded matrix to GPU for (int i = 0; i < NXZ; i++) for (int j = 0; j < NYW; j++) C[MAX_MULTI_BLAS_N * i + j] = make_Float2<Float2>(Complex(c.data[NYW * i + j])); auto Cmatrix_d = instance.get_constant_ptr("quda::blas::Cmatrix_d"); cuMemcpyHtoDAsync(Cmatrix_d, C, MAX_MATRIX_SIZE, *getStream()); } jitify_error = instance.configure(tp.grid, tp.block, tp.shared_bytes, stream).launch(arg); #else // FIXME - if NXZ=1 no need to copy entire array // FIXME - do we really need strided access here? if (a.data && a.use_const) { Float2 A[MAX_MATRIX_SIZE / sizeof(Float2)]; // since the kernel doesn't know the width of them matrix at compile // time we stride it and copy the padded matrix to GPU for (int i = 0; i < NXZ; i++) for (int j = 0; j < NYW; j++) A[MAX_MULTI_BLAS_N * i + j] = make_Float2<Float2>(Complex(a.data[NYW * i + j])); hipMemcpyToSymbolAsync(Amatrix_d, A, MAX_MATRIX_SIZE, 0, hipMemcpyHostToDevice, *getStream()); } if (b.data && b.use_const) { Float2 B[MAX_MATRIX_SIZE / sizeof(Float2)]; // since the kernel doesn't know the width of them matrix at compile // time we stride it and copy the padded matrix to GPU for (int i = 0; i < NXZ; i++) for (int j = 0; j < NYW; j++) B[MAX_MULTI_BLAS_N * i + j] = make_Float2<Float2>(Complex(b.data[NYW * i + j])); hipMemcpyToSymbolAsync(Bmatrix_d, B, MAX_MATRIX_SIZE, 0, hipMemcpyHostToDevice, *getStream()); } if (c.data && c.use_const) { Float2 C[MAX_MATRIX_SIZE / sizeof(Float2)]; // since the kernel doesn't know the width of them matrix at compile // time we stride it and copy the padded matrix to GPU for (int i = 0; i < NXZ; i++) for (int j = 0; j < NYW; j++) C[MAX_MULTI_BLAS_N * i + j] = make_Float2<Float2>(Complex(c.data[NYW * i + j])); hipMemcpyToSymbolAsync(Cmatrix_d, C, MAX_MATRIX_SIZE, 0, hipMemcpyHostToDevice, *getStream()); } #if TORCH_HIP_VERSION < 9000 hipMemcpyToSymbolAsync(arg_buffer, reinterpret_cast<char *>(&arg), sizeof(arg), 0, hipMemcpyHostToDevice, *getStream()); #endif hipLaunchKernelGGL(( multiBlasKernel<FloatN, M, NXZ>), dim3(tp.grid), dim3(tp.block), tp.shared_bytes, stream, arg); #endif } void preTune() { for (int i = 0; i < NYW; ++i) { arg.Y[i].backup(&Y_h[i], &Ynorm_h[i], y[i]->Bytes(), y[i]->NormBytes()); arg.W[i].backup(&W_h[i], &Wnorm_h[i], w[i]->Bytes(), w[i]->NormBytes()); } } void postTune() { for (int i = 0; i < NYW; ++i) { arg.Y[i].restore(&Y_h[i], &Ynorm_h[i], y[i]->Bytes(), y[i]->NormBytes()); arg.W[i].restore(&W_h[i], &Wnorm_h[i], w[i]->Bytes(), w[i]->NormBytes()); } } void initTuneParam(TuneParam &param) const { TunableVectorY::initTuneParam(param); param.grid.z = nParity; } void defaultTuneParam(TuneParam &param) const { TunableVectorY::defaultTuneParam(param); param.grid.z = nParity; } long long flops() const { return arg.f.flops() * vec_length<FloatN>::value * (long)arg.length * nParity * M; } long long bytes() const { // the factor two here assumes we are reading and writing to the high precision vector return ((arg.f.streams() - 2) * x[0]->Bytes() + 2 * y[0]->Bytes()); } int tuningIter() const { return 3; } }; template <int NXZ, typename RegType, typename StoreType, typename yType, int M, template <int, typename, typename> class Functor, typename write, typename T> void multiBlas(const coeff_array<T> &a, const coeff_array<T> &b, const coeff_array<T> &c, std::vector<ColorSpinorField *> &x, std::vector<ColorSpinorField *> &y, std::vector<ColorSpinorField *> &z, std::vector<ColorSpinorField *> &w, int length) { const int NYW = y.size(); const int N = NXZ > NYW ? NXZ : NYW; if (N > MAX_MULTI_BLAS_N) errorQuda("Spinor vector length exceeds max size (%d > %d)", N, MAX_MULTI_BLAS_N); if (NXZ * NYW * sizeof(Complex) > MAX_MATRIX_SIZE) errorQuda("A matrix exceeds max size (%lu > %d)", NXZ * NYW * sizeof(Complex), MAX_MATRIX_SIZE); typedef typename scalar<RegType>::type Float; typedef typename vector<Float, 2>::type Float2; typedef vector<Float, 2> vec2; SpinorTexture<RegType, StoreType, M> X[NXZ]; Spinor<RegType, yType, M, write::Y> Y[MAX_MULTI_BLAS_N]; SpinorTexture<RegType, StoreType, M> Z[NXZ]; Spinor<RegType, StoreType, M, write::W> W[MAX_MULTI_BLAS_N]; for (int i = 0; i < NXZ; i++) { X[i].set(*dynamic_cast<cudaColorSpinorField *>(x[i])); Z[i].set(*dynamic_cast<cudaColorSpinorField *>(z[i])); } for (int i = 0; i < NYW; i++) { Y[i].set(*dynamic_cast<cudaColorSpinorField *>(y[i])); W[i].set(*dynamic_cast<cudaColorSpinorField *>(w[i])); } // if block caxpy is an 'outer product of caxpy' where 'x' Functor<NXZ, Float2, RegType> f(a, b, c, NYW); MultiBlas<NXZ, RegType, M, SpinorTexture<RegType, StoreType, M>, Spinor<RegType, yType, M, write::Y>, SpinorTexture<RegType, StoreType, M>, Spinor<RegType, StoreType, M, write::W>, decltype(f), T> blas(X, Y, Z, W, f, a, b, c, x, y, z, w, NYW, length); blas.apply(*getStream()); blas::bytes += blas.bytes(); blas::flops += blas.flops(); checkCudaError(); } /** Driver for generic blas routine with four loads and two store. */ template <int NXZ, template <int MXZ, typename Float, typename FloatN> class Functor, typename write, typename T> void multiBlas(const coeff_array<T> &a, const coeff_array<T> &b, const coeff_array<T> &c, CompositeColorSpinorField &x, CompositeColorSpinorField &y, CompositeColorSpinorField &z, CompositeColorSpinorField &w) { if (checkLocation(*x[0], *y[0], *z[0], *w[0]) == QUDA_CUDA_FIELD_LOCATION) { if (y[0]->Precision() == QUDA_DOUBLE_PRECISION && x[0]->Precision() == QUDA_DOUBLE_PRECISION) { #if QUDA_PRECISION & 8 #if defined(GPU_WILSON_DIRAC) || defined(GPU_DOMAIN_WALL_DIRAC) || defined(GPU_STAGGERED_DIRAC) const int M = 1; multiBlas<NXZ, double2, double2, double2, M, Functor, write>(a, b, c, x, y, z, w, x[0]->Length() / (2 * M)); #else errorQuda("blas has not been built for Nspin=%d fields", x[0]->Nspin()); #endif #else errorQuda("QUDA_PRECISION=%d does not enable precision %d", QUDA_PRECISION, x[0]->Precision()); #endif } else if (y[0]->Precision() == QUDA_SINGLE_PRECISION && x[0]->Precision() == QUDA_SINGLE_PRECISION) { #if QUDA_PRECISION & 4 if (x[0]->Nspin() == 4) { #if defined(GPU_WILSON_DIRAC) || defined(GPU_DOMAIN_WALL_DIRAC) const int M = 1; multiBlas<NXZ, float4, float4, float4, M, Functor, write>(a, b, c, x, y, z, w, x[0]->Length() / (4 * M)); #else errorQuda("blas has not been built for Nspin=%d fields", x[0]->Nspin()); #endif } else if (x[0]->Nspin() == 2 || x[0]->Nspin() == 1) { #if defined(GPU_WILSON_DIRAC) || defined(GPU_DOMAIN_WALL_DIRAC) || defined(GPU_STAGGERED_DIRAC) const int M = 1; multiBlas<NXZ, float2, float2, float2, M, Functor, write>(a, b, c, x, y, z, w, x[0]->Length() / (2 * M)); #else errorQuda("blas has not been built for Nspin=%d fields", x[0]->Nspin()); #endif } else { errorQuda("nSpin=%d is not supported\n", x[0]->Nspin()); } #else errorQuda("QUDA_PRECISION=%d does not enable precision %d", QUDA_PRECISION, x[0]->Precision()); #endif } else if (y[0]->Precision() == QUDA_HALF_PRECISION && x[0]->Precision() == QUDA_HALF_PRECISION) { #if QUDA_PRECISION & 2 if (x[0]->Ncolor() != 3) { errorQuda("nColor = %d is not supported", x[0]->Ncolor()); } if (x[0]->Nspin() == 4) { // wilson #if defined(GPU_WILSON_DIRAC) || defined(GPU_DOMAIN_WALL_DIRAC) const int M = 6; multiBlas<NXZ, float4, short4, short4, M, Functor, write>(a, b, c, x, y, z, w, x[0]->Volume()); #else errorQuda("blas has not been built for Nspin=%d fields", x[0]->Nspin()); #endif } else if (x[0]->Nspin() == 1) { // staggered #ifdef GPU_STAGGERED_DIRAC const int M = 3; multiBlas<NXZ, float2, short2, short2, M, Functor, write>(a, b, c, x, y, z, w, x[0]->Volume()); #else errorQuda("blas has not been built for Nspin=%d fields", x[0]->Nspin()); #endif } else { errorQuda("nSpin=%d is not supported\n", x[0]->Nspin()); } #else errorQuda("QUDA_PRECISION=%d does not enable precision %d", QUDA_PRECISION, x[0]->Precision()); #endif } else if (y[0]->Precision() == QUDA_QUARTER_PRECISION && x[0]->Precision() == QUDA_QUARTER_PRECISION) { #if QUDA_PRECISION & 1 if (x[0]->Ncolor() != 3) { errorQuda("nColor = %d is not supported", x[0]->Ncolor()); } if (x[0]->Nspin() == 4) { // wilson #if defined(GPU_WILSON_DIRAC) || defined(GPU_DOMAIN_WALL_DIRAC) const int M = 6; multiBlas<NXZ, float4, char4, char4, M, Functor, write>(a, b, c, x, y, z, w, x[0]->Volume()); #else errorQuda("blas has not been built for Nspin=%d fields", x[0]->Nspin()); #endif } else if (x[0]->Nspin() == 1) { // staggered #ifdef GPU_STAGGERED_DIRAC const int M = 3; multiBlas<NXZ, float2, char2, char2, M, Functor, write>(a, b, c, x, y, z, w, x[0]->Volume()); #else errorQuda("blas has not been built for Nspin=%d fields", x[0]->Nspin()); #endif } else { errorQuda("nSpin=%d is not supported\n", x[0]->Nspin()); } #else errorQuda("QUDA_PRECISION=%d does not enable precision %d", QUDA_PRECISION, x[0]->Precision()); #endif } else { errorQuda("Precision combination x=%d not supported\n", x[0]->Precision()); } } else { // fields on the cpu errorQuda("Not implemented"); } } /** Driver for generic blas routine with four loads and two store. */ template <int NXZ, template <int MXZ, typename Float, typename FloatN> class Functor, typename write, typename T> void mixedMultiBlas(const coeff_array<T> &a, const coeff_array<T> &b, const coeff_array<T> &c, CompositeColorSpinorField &x, CompositeColorSpinorField &y, CompositeColorSpinorField &z, CompositeColorSpinorField &w) { if (checkLocation(*x[0], *y[0], *z[0], *w[0]) == QUDA_CUDA_FIELD_LOCATION) { if (y[0]->Precision() == QUDA_DOUBLE_PRECISION) { #if QUDA_PRECISION & 8 if (x[0]->Precision() == QUDA_SINGLE_PRECISION) { #if QUDA_PRECISION & 4 if (x[0]->Nspin() == 4) { #if defined(GPU_WILSON_DIRAC) || defined(GPU_DOMAIN_WALL_DIRAC) const int M = 12; multiBlas<NXZ, double2, float4, double2, M, Functor, write>(a, b, c, x, y, z, w, x[0]->Volume()); #else errorQuda("blas has not been built for Nspin=%d fields", x[0]->Nspin()); #endif } else if (x[0]->Nspin() == 1) { #if defined(GPU_STAGGERED_DIRAC) const int M = 3; multiBlas<NXZ, double2, float2, double2, M, Functor, write>(a, b, c, x, y, z, w, x[0]->Volume()); #else errorQuda("blas has not been built for Nspin=%d fields", x[0]->Nspin()); #endif } #else errorQuda("QUDA_PRECISION=%d does not enable precision %d", QUDA_PRECISION, x[0]->Precision()); #endif } else if (x[0]->Precision() == QUDA_HALF_PRECISION) { #if QUDA_PRECISION & 2 if (x[0]->Nspin() == 4) { #if defined(GPU_WILSON_DIRAC) || defined(GPU_DOMAIN_WALL_DIRAC) const int M = 12; multiBlas<NXZ, double2, short4, double2, M, Functor, write>(a, b, c, x, y, z, w, x[0]->Volume()); #else errorQuda("blas has not been built for Nspin=%d fields", x[0]->Nspin()); #endif } else if (x[0]->Nspin() == 1) { #if defined(GPU_STAGGERED_DIRAC) const int M = 3; multiBlas<NXZ, double2, short2, double2, M, Functor, write>(a, b, c, x, y, z, w, x[0]->Volume()); #else errorQuda("blas has not been built for Nspin=%d fields", x[0]->Nspin()); #endif } #else errorQuda("QUDA_PRECISION=%d does not enable precision %d", QUDA_PRECISION, x[0]->Precision()); #endif } else if (x[0]->Precision() == QUDA_QUARTER_PRECISION) { #if QUDA_PRECISION & 1 if (x[0]->Nspin() == 4) { #if defined(GPU_WILSON_DIRAC) || defined(GPU_DOMAIN_WALL_DIRAC) const int M = 12; multiBlas<NXZ, double2, char4, double2, M, Functor, write>(a, b, c, x, y, z, w, x[0]->Volume()); #else errorQuda("blas has not been built for Nspin=%d fields", x[0]->Nspin()); #endif } else if (x[0]->Nspin() == 1) { #if defined(GPU_STAGGERED_DIRAC) const int M = 3; multiBlas<NXZ, double2, char2, double2, M, Functor, write>(a, b, c, x, y, z, w, x[0]->Volume()); #else errorQuda("blas has not been built for Nspin=%d fields", x[0]->Nspin()); #endif } #else errorQuda("QUDA_PRECISION=%d does not enable precision %d", QUDA_PRECISION, x[0]->Precision()); #endif } else { errorQuda("Not implemented for this precision combination %d %d", x[0]->Precision(), y[0]->Precision()); } #else errorQuda("QUDA_PRECISION=%d does not enable precision %d", QUDA_PRECISION, y[0]->Precision()); #endif } else if (y[0]->Precision() == QUDA_SINGLE_PRECISION) { #if (QUDA_PRECISION & 4) if (x[0]->Precision() == QUDA_HALF_PRECISION) { #if (QUDA_PRECISION & 2) if (x[0]->Nspin() == 4) { #if defined(GPU_WILSON_DIRAC) || defined(GPU_DOMAIN_WALL_DIRAC) const int M = 6; multiBlas<NXZ, float4, short4, float4, M, Functor, write>(a, b, c, x, y, z, w, x[0]->Volume()); #else errorQuda("blas has not been built for Nspin=%d fields", x[0]->Nspin()); #endif } else if (x[0]->Nspin() == 2 || x[0]->Nspin() == 1) { #if defined(GPU_WILSON_DIRAC) || defined(GPU_DOMAIN_WALL_DIRAC) || defined(GPU_STAGGERED_DIRAC) const int M = 3; multiBlas<NXZ, float2, short2, float2, M, Functor, write>(a, b, c, x, y, z, w, x[0]->Volume()); #else errorQuda("blas has not been built for Nspin=%d fields", x[0]->Nspin()); #endif } else { errorQuda("nSpin=%d is not supported\n", x[0]->Nspin()); } #else errorQuda("QUDA_PRECISION=%d does not enable precision %d", QUDA_PRECISION, y[0]->Precision()); #endif } else if (x[0]->Precision() == QUDA_QUARTER_PRECISION) { #if (QUDA_PRECISION & 1) if (x[0]->Nspin() == 4) { #if defined(GPU_WILSON_DIRAC) || defined(GPU_DOMAIN_WALL_DIRAC) const int M = 6; multiBlas<NXZ, float4, char4, float4, M, Functor, write>(a, b, c, x, y, z, w, x[0]->Volume()); #else errorQuda("blas has not been built for Nspin=%d fields", x[0]->Nspin()); #endif } else if (x[0]->Nspin() == 2 || x[0]->Nspin() == 1) { #if defined(GPU_WILSON_DIRAC) || defined(GPU_DOMAIN_WALL_DIRAC) || defined(GPU_STAGGERED_DIRAC) const int M = 3; multiBlas<NXZ, float2, char2, float2, M, Functor, write>(a, b, c, x, y, z, w, x[0]->Volume()); #else errorQuda("blas has not been built for Nspin=%d fields", x[0]->Nspin()); #endif } else { errorQuda("nSpin=%d is not supported\n", x[0]->Nspin()); } #else errorQuda("QUDA_PRECISION=%d does not enable precision %d", QUDA_PRECISION, y[0]->Precision()); #endif } else { errorQuda("Precision combination x=%d y=%d not supported\n", x[0]->Precision(), y[0]->Precision()); } #else errorQuda("QUDA_PRECISION=%d does not enable precision %d", QUDA_PRECISION, y[0]->Precision()); #endif } else { errorQuda("Precision combination x=%d y=%d not supported\n", x[0]->Precision(), y[0]->Precision()); } } else { // fields on the cpu errorQuda("Not implemented"); } } void caxpy_recurse(const Complex *a_, std::vector<ColorSpinorField*> &x, std::vector<ColorSpinorField*> &y, int i_idx ,int j_idx, int upper) { if (y.size() > MAX_MULTI_BLAS_N) // if greater than max single-kernel size, recurse. { // We need to split up 'a' carefully since it's row-major. Complex* tmpmajor = new Complex[x.size()*y.size()]; Complex* tmpmajor0 = &tmpmajor[0]; Complex* tmpmajor1 = &tmpmajor[x.size()*(y.size()/2)]; std::vector<ColorSpinorField*> y0(y.begin(), y.begin() + y.size()/2); std::vector<ColorSpinorField*> y1(y.begin() + y.size()/2, y.end()); const unsigned int xlen = x.size(); const unsigned int ylen0 = y.size()/2; const unsigned int ylen1 = y.size() - y.size()/2; int count = 0, count0 = 0, count1 = 0; for (unsigned int i = 0; i < xlen; i++) { for (unsigned int j = 0; j < ylen0; j++) tmpmajor0[count0++] = a_[count++]; for (unsigned int j = 0; j < ylen1; j++) tmpmajor1[count1++] = a_[count++]; } caxpy_recurse(tmpmajor0, x, y0, i_idx, 2*j_idx+0, upper); caxpy_recurse(tmpmajor1, x, y1, i_idx, 2*j_idx+1, upper); delete[] tmpmajor; } else { // if at the bottom of recursion, // return if on lower left for upper triangular, // return if on upper right for lower triangular. if (x.size() <= MAX_MULTI_BLAS_N) { if (upper == 1 && j_idx < i_idx) { return; } if (upper == -1 && j_idx > i_idx) { return; } } // mark true since we will copy the "a" matrix into constant memory coeff_array<Complex> a(a_, true), b, c; if (x[0]->Precision() == y[0]->Precision()) { switch (x.size()) { case 1: multiBlas<1, multicaxpy_, write<0, 1, 0, 0>>(a, b, c, x, y, x, y); break; #if MAX_MULTI_BLAS_N >= 2 case 2: multiBlas<2, multicaxpy_, write<0, 1, 0, 0>>(a, b, c, x, y, x, y); break; #if MAX_MULTI_BLAS_N >= 3 case 3: multiBlas<3, multicaxpy_, write<0, 1, 0, 0>>(a, b, c, x, y, x, y); break; #if MAX_MULTI_BLAS_N >= 4 case 4: multiBlas<4, multicaxpy_, write<0, 1, 0, 0>>(a, b, c, x, y, x, y); break; #if MAX_MULTI_BLAS_N >= 5 case 5: multiBlas<5, multicaxpy_, write<0, 1, 0, 0>>(a, b, c, x, y, x, y); break; #if MAX_MULTI_BLAS_N >= 6 case 6: multiBlas<6, multicaxpy_, write<0, 1, 0, 0>>(a, b, c, x, y, x, y); break; #if MAX_MULTI_BLAS_N >= 7 case 7: multiBlas<7, multicaxpy_, write<0, 1, 0, 0>>(a, b, c, x, y, x, y); break; #if MAX_MULTI_BLAS_N >= 8 case 8: multiBlas<8, multicaxpy_, write<0, 1, 0, 0>>(a, b, c, x, y, x, y); break; #if MAX_MULTI_BLAS_N >= 9 case 9: multiBlas<9, multicaxpy_, write<0, 1, 0, 0>>(a, b, c, x, y, x, y); break; #if MAX_MULTI_BLAS_N >= 10 case 10: multiBlas<10, multicaxpy_, write<0, 1, 0, 0>>(a, b, c, x, y, x, y); break; #if MAX_MULTI_BLAS_N >= 11 case 11: multiBlas<11, multicaxpy_, write<0, 1, 0, 0>>(a, b, c, x, y, x, y); break; #if MAX_MULTI_BLAS_N >= 12 case 12: multiBlas<12, multicaxpy_, write<0, 1, 0, 0>>(a, b, c, x, y, x, y); break; #if MAX_MULTI_BLAS_N >= 13 case 13: multiBlas<13, multicaxpy_, write<0, 1, 0, 0>>(a, b, c, x, y, x, y); break; #if MAX_MULTI_BLAS_N >= 14 case 14: multiBlas<14, multicaxpy_, write<0, 1, 0, 0>>(a, b, c, x, y, x, y); break; #if MAX_MULTI_BLAS_N >= 15 case 15: multiBlas<15, multicaxpy_, write<0, 1, 0, 0>>(a, b, c, x, y, x, y); break; #if MAX_MULTI_BLAS_N >= 16 case 16: multiBlas<16, multicaxpy_, write<0, 1, 0, 0>>(a, b, c, x, y, x, y); break; #endif // 16 #endif // 15 #endif // 14 #endif // 13 #endif // 12 #endif // 11 #endif // 10 #endif // 9 #endif // 8 #endif // 7 #endif // 6 #endif // 5 #endif // 4 #endif // 3 #endif // 2 default: // split the problem in half and recurse const Complex *a0 = &a_[0]; const Complex *a1 = &a_[(x.size()/2)*y.size()]; std::vector<ColorSpinorField*> x0(x.begin(), x.begin() + x.size()/2); std::vector<ColorSpinorField*> x1(x.begin() + x.size()/2, x.end()); caxpy_recurse(a0, x0, y, 2*i_idx+0, j_idx, upper); caxpy_recurse(a1, x1, y, 2*i_idx+1, j_idx, upper); break; } } else // precisions don't agree. { switch (x.size()) { case 1: mixedMultiBlas<1, multicaxpy_, write<0, 1, 0, 0>>(a, b, c, x, y, x, y); break; #if MAX_MULTI_BLAS_N >= 2 case 2: mixedMultiBlas<2, multicaxpy_, write<0, 1, 0, 0>>(a, b, c, x, y, x, y); break; #if MAX_MULTI_BLAS_N >= 3 case 3: mixedMultiBlas<3, multicaxpy_, write<0, 1, 0, 0>>(a, b, c, x, y, x, y); break; #if MAX_MULTI_BLAS_N >= 4 case 4: mixedMultiBlas<4, multicaxpy_, write<0, 1, 0, 0>>(a, b, c, x, y, x, y); break; #if MAX_MULTI_BLAS_N >= 5 case 5: mixedMultiBlas<5, multicaxpy_, write<0, 1, 0, 0>>(a, b, c, x, y, x, y); break; #if MAX_MULTI_BLAS_N >= 6 case 6: mixedMultiBlas<6, multicaxpy_, write<0, 1, 0, 0>>(a, b, c, x, y, x, y); break; #if MAX_MULTI_BLAS_N >= 7 case 7: mixedMultiBlas<7, multicaxpy_, write<0, 1, 0, 0>>(a, b, c, x, y, x, y); break; #if MAX_MULTI_BLAS_N >= 8 case 8: mixedMultiBlas<8, multicaxpy_, write<0, 1, 0, 0>>(a, b, c, x, y, x, y); break; #if MAX_MULTI_BLAS_N >= 9 case 9: mixedMultiBlas<9, multicaxpy_, write<0, 1, 0, 0>>(a, b, c, x, y, x, y); break; #if MAX_MULTI_BLAS_N >= 10 case 10: mixedMultiBlas<10, multicaxpy_, write<0, 1, 0, 0>>(a, b, c, x, y, x, y); break; #if MAX_MULTI_BLAS_N >= 11 case 11: mixedMultiBlas<11, multicaxpy_, write<0, 1, 0, 0>>(a, b, c, x, y, x, y); break; #if MAX_MULTI_BLAS_N >= 12 case 12: mixedMultiBlas<12, multicaxpy_, write<0, 1, 0, 0>>(a, b, c, x, y, x, y); break; #if MAX_MULTI_BLAS_N >= 13 case 13: mixedMultiBlas<13, multicaxpy_, write<0, 1, 0, 0>>(a, b, c, x, y, x, y); break; #if MAX_MULTI_BLAS_N >= 14 case 14: mixedMultiBlas<14, multicaxpy_, write<0, 1, 0, 0>>(a, b, c, x, y, x, y); break; #if MAX_MULTI_BLAS_N >= 15 case 15: mixedMultiBlas<15, multicaxpy_, write<0, 1, 0, 0>>(a, b, c, x, y, x, y); break; #if MAX_MULTI_BLAS_N >= 16 case 16: mixedMultiBlas<16, multicaxpy_, write<0, 1, 0, 0>>(a, b, c, x, y, x, y); break; #endif // 16 #endif // 15 #endif // 14 #endif // 13 #endif // 12 #endif // 11 #endif // 10 #endif // 9 #endif // 8 #endif // 7 #endif // 6 #endif // 5 #endif // 4 #endif // 3 #endif // 2 default: // split the problem in half and recurse const Complex *a0 = &a_[0]; const Complex *a1 = &a_[(x.size()/2)*y.size()]; std::vector<ColorSpinorField*> x0(x.begin(), x.begin() + x.size()/2); std::vector<ColorSpinorField*> x1(x.begin() + x.size()/2, x.end()); caxpy_recurse(a0, x0, y, 2*i_idx+0, j_idx, upper); caxpy_recurse(a1, x1, y, 2*i_idx+1, j_idx, upper); break; } } } // end if (y.size() > MAX_MULTI_BLAS_N) } void caxpy(const Complex *a_, std::vector<ColorSpinorField*> &x, std::vector<ColorSpinorField*> &y) { // Enter a recursion. // Pass a, x, y. (0,0) indexes the tiles. false specifies the matrix is unstructured. caxpy_recurse(a_, x, y, 0, 0, 0); } void caxpy_U(const Complex *a_, std::vector<ColorSpinorField*> &x, std::vector<ColorSpinorField*> &y) { // Enter a recursion. // Pass a, x, y. (0,0) indexes the tiles. 1 indicates the matrix is upper-triangular, // which lets us skip some tiles. if (x.size() != y.size()) { errorQuda("An optimal block caxpy_U with non-square 'a' has not yet been implemented. Use block caxpy instead.\n"); return; } caxpy_recurse(a_, x, y, 0, 0, 1); } void caxpy_L(const Complex *a_, std::vector<ColorSpinorField*> &x, std::vector<ColorSpinorField*> &y) { // Enter a recursion. // Pass a, x, y. (0,0) indexes the tiles. -1 indicates the matrix is lower-triangular // which lets us skip some tiles. if (x.size() != y.size()) { errorQuda("An optimal block caxpy_L with non-square 'a' has not yet been implemented. Use block caxpy instead.\n"); return; } caxpy_recurse(a_, x, y, 0, 0, -1); } void caxpy(const Complex *a, ColorSpinorField &x, ColorSpinorField &y) { caxpy(a, x.Components(), y.Components()); } void caxpy_U(const Complex *a, ColorSpinorField &x, ColorSpinorField &y) { caxpy_U(a, x.Components(), y.Components()); } void caxpy_L(const Complex *a, ColorSpinorField &x, ColorSpinorField &y) { caxpy_L(a, x.Components(), y.Components()); } void caxpyz_recurse(const Complex *a_, std::vector<ColorSpinorField*> &x, std::vector<ColorSpinorField*> &y, std::vector<ColorSpinorField*> &z, int i, int j, int pass, int upper) { if (y.size() > MAX_MULTI_BLAS_N) // if greater than max single-kernel size, recurse. { // We need to split up 'a' carefully since it's row-major. Complex* tmpmajor = new Complex[x.size()*y.size()]; Complex* tmpmajor0 = &tmpmajor[0]; Complex* tmpmajor1 = &tmpmajor[x.size()*(y.size()/2)]; std::vector<ColorSpinorField*> y0(y.begin(), y.begin() + y.size()/2); std::vector<ColorSpinorField*> y1(y.begin() + y.size()/2, y.end()); std::vector<ColorSpinorField*> z0(z.begin(), z.begin() + z.size()/2); std::vector<ColorSpinorField*> z1(z.begin() + z.size()/2, z.end()); const unsigned int xlen = x.size(); const unsigned int ylen0 = y.size()/2; const unsigned int ylen1 = y.size() - y.size()/2; int count = 0, count0 = 0, count1 = 0; for (unsigned int i_ = 0; i_ < xlen; i_++) { for (unsigned int j = 0; j < ylen0; j++) tmpmajor0[count0++] = a_[count++]; for (unsigned int j = 0; j < ylen1; j++) tmpmajor1[count1++] = a_[count++]; } caxpyz_recurse(tmpmajor0, x, y0, z0, i, 2*j+0, pass, upper); caxpyz_recurse(tmpmajor1, x, y1, z1, i, 2*j+1, pass, upper); delete[] tmpmajor; } else { // if at bottom of recursion check where we are if (x.size() <= MAX_MULTI_BLAS_N) { if (pass==1) { if (i!=j) { if (upper == 1 && j < i) { return; } // upper right, don't need to update lower left. if (upper == -1 && i < j) { return; } // lower left, don't need to update upper right. caxpy(a_, x, z); return; // off diagonal } return; } else { if (i!=j) return; // We're on the first pass, so we only want to update the diagonal. } } // mark true since we will copy the "a" matrix into constant memory coeff_array<Complex> a(a_, true), b, c; if (x[0]->Precision() == y[0]->Precision()) { switch (x.size()) { case 1: multiBlas<1, multicaxpyz_, write<0, 0, 0, 1>>(a, b, c, x, y, x, z); break; #if MAX_MULTI_BLAS_N >= 2 case 2: multiBlas<2, multicaxpyz_, write<0, 0, 0, 1>>(a, b, c, x, y, x, z); break; #if MAX_MULTI_BLAS_N >= 3 case 3: multiBlas<3, multicaxpyz_, write<0, 0, 0, 1>>(a, b, c, x, y, x, z); break; #if MAX_MULTI_BLAS_N >= 4 case 4: multiBlas<4, multicaxpyz_, write<0, 0, 0, 1>>(a, b, c, x, y, x, z); break; #if MAX_MULTI_BLAS_N >= 5 case 5: multiBlas<5, multicaxpyz_, write<0, 0, 0, 1>>(a, b, c, x, y, x, z); break; #if MAX_MULTI_BLAS_N >= 6 case 6: multiBlas<6, multicaxpyz_, write<0, 0, 0, 1>>(a, b, c, x, y, x, z); break; #if MAX_MULTI_BLAS_N >= 7 case 7: multiBlas<7, multicaxpyz_, write<0, 0, 0, 1>>(a, b, c, x, y, x, z); break; #if MAX_MULTI_BLAS_N >= 8 case 8: multiBlas<8, multicaxpyz_, write<0, 0, 0, 1>>(a, b, c, x, y, x, z); break; #if MAX_MULTI_BLAS_N >= 9 case 9: multiBlas<9, multicaxpyz_, write<0, 0, 0, 1>>(a, b, c, x, y, x, z); break; #if MAX_MULTI_BLAS_N >= 10 case 10: multiBlas<10, multicaxpyz_, write<0, 0, 0, 1>>(a, b, c, x, y, x, z); break; #if MAX_MULTI_BLAS_N >= 11 case 11: multiBlas<11, multicaxpyz_, write<0, 0, 0, 1>>(a, b, c, x, y, x, z); break; #if MAX_MULTI_BLAS_N >= 12 case 12: multiBlas<12, multicaxpyz_, write<0, 0, 0, 1>>(a, b, c, x, y, x, z); break; #if MAX_MULTI_BLAS_N >= 13 case 13: multiBlas<13, multicaxpyz_, write<0, 0, 0, 1>>(a, b, c, x, y, x, z); break; #if MAX_MULTI_BLAS_N >= 14 case 14: multiBlas<14, multicaxpyz_, write<0, 0, 0, 1>>(a, b, c, x, y, x, z); break; #if MAX_MULTI_BLAS_N >= 15 case 15: multiBlas<15, multicaxpyz_, write<0, 0, 0, 1>>(a, b, c, x, y, x, z); break; #if MAX_MULTI_BLAS_N >= 16 case 16: multiBlas<16, multicaxpyz_, write<0, 0, 0, 1>>(a, b, c, x, y, x, z); break; #endif // 16 #endif // 15 #endif // 14 #endif // 13 #endif // 12 #endif // 11 #endif // 10 #endif // 9 #endif // 8 #endif // 7 #endif // 6 #endif // 5 #endif // 4 #endif // 3 #endif // 2 default: // split the problem in half and recurse const Complex *a0 = &a_[0]; const Complex *a1 = &a_[(x.size()/2)*y.size()]; std::vector<ColorSpinorField*> x0(x.begin(), x.begin() + x.size()/2); std::vector<ColorSpinorField*> x1(x.begin() + x.size()/2, x.end()); caxpyz_recurse(a0, x0, y, z, 2*i+0, j, pass, upper); caxpyz_recurse(a1, x1, y, z, 2*i+1, j, pass, upper); // b/c we don't want to re-zero z. break; } } else // precisions don't agree. { switch (x.size()) { case 1: mixedMultiBlas<1, multicaxpyz_, write<0, 0, 0, 1>>(a, b, c, x, y, x, z); break; #if MAX_MULTI_BLAS_N >= 2 case 2: mixedMultiBlas<2, multicaxpyz_, write<0, 0, 0, 1>>(a, b, c, x, y, x, z); break; #if MAX_MULTI_BLAS_N >= 3 case 3: mixedMultiBlas<3, multicaxpyz_, write<0, 0, 0, 1>>(a, b, c, x, y, x, z); break; #if MAX_MULTI_BLAS_N >= 4 case 4: mixedMultiBlas<4, multicaxpyz_, write<0, 0, 0, 1>>(a, b, c, x, y, x, z); break; #if MAX_MULTI_BLAS_N >= 5 case 5: mixedMultiBlas<5, multicaxpyz_, write<0, 0, 0, 1>>(a, b, c, x, y, x, z); break; #if MAX_MULTI_BLAS_N >= 6 case 6: mixedMultiBlas<6, multicaxpyz_, write<0, 0, 0, 1>>(a, b, c, x, y, x, z); break; #if MAX_MULTI_BLAS_N >= 7 case 7: mixedMultiBlas<7, multicaxpyz_, write<0, 0, 0, 1>>(a, b, c, x, y, x, z); break; #if MAX_MULTI_BLAS_N >= 8 case 8: mixedMultiBlas<8, multicaxpyz_, write<0, 0, 0, 1>>(a, b, c, x, y, x, z); break; #if MAX_MULTI_BLAS_N >= 9 case 9: mixedMultiBlas<9, multicaxpyz_, write<0, 0, 0, 1>>(a, b, c, x, y, x, z); break; #if MAX_MULTI_BLAS_N >= 10 case 10: mixedMultiBlas<10, multicaxpyz_, write<0, 0, 0, 1>>(a, b, c, x, y, x, z); break; #if MAX_MULTI_BLAS_N >= 11 case 11: mixedMultiBlas<11, multicaxpyz_, write<0, 0, 0, 1>>(a, b, c, x, y, x, z); break; #if MAX_MULTI_BLAS_N >= 12 case 12: mixedMultiBlas<12, multicaxpyz_, write<0, 0, 0, 1>>(a, b, c, x, y, x, z); break; #if MAX_MULTI_BLAS_N >= 13 case 13: mixedMultiBlas<13, multicaxpyz_, write<0, 0, 0, 1>>(a, b, c, x, y, x, z); break; #if MAX_MULTI_BLAS_N >= 14 case 14: mixedMultiBlas<14, multicaxpyz_, write<0, 0, 0, 1>>(a, b, c, x, y, x, z); break; #if MAX_MULTI_BLAS_N >= 15 case 15: mixedMultiBlas<15, multicaxpyz_, write<0, 0, 0, 1>>(a, b, c, x, y, x, z); break; #if MAX_MULTI_BLAS_N >= 16 case 16: mixedMultiBlas<16, multicaxpyz_, write<0, 0, 0, 1>>(a, b, c, x, y, x, z); break; #endif // 16 #endif // 15 #endif // 14 #endif // 13 #endif // 12 #endif // 11 #endif // 10 #endif // 9 #endif // 8 #endif // 7 #endif // 6 #endif // 5 #endif // 4 #endif // 3 #endif // 2 default: // split the problem in half and recurse const Complex *a0 = &a_[0]; const Complex *a1 = &a_[(x.size()/2)*y.size()]; std::vector<ColorSpinorField*> x0(x.begin(), x.begin() + x.size()/2); std::vector<ColorSpinorField*> x1(x.begin() + x.size()/2, x.end()); caxpyz_recurse(a0, x0, y, z, 2*i+0, j, pass, upper); caxpyz_recurse(a1, x1, y, z, 2*i+1, j, pass, upper); break; } } } // end if (y.size() > MAX_MULTI_BLAS_N) } void caxpyz(const Complex *a, std::vector<ColorSpinorField*> &x, std::vector<ColorSpinorField*> &y, std::vector<ColorSpinorField*> &z) { // first pass does the caxpyz on the diagonal caxpyz_recurse(a, x, y, z, 0, 0, 0, 0); // second pass does caxpy on the off diagonals caxpyz_recurse(a, x, y, z, 0, 0, 1, 0); } void caxpyz_U(const Complex *a, std::vector<ColorSpinorField*> &x, std::vector<ColorSpinorField*> &y, std::vector<ColorSpinorField*> &z) { // a is upper triangular. // first pass does the caxpyz on the diagonal caxpyz_recurse(a, x, y, z, 0, 0, 0, 1); // second pass does caxpy on the off diagonals caxpyz_recurse(a, x, y, z, 0, 0, 1, 1); } void caxpyz_L(const Complex *a, std::vector<ColorSpinorField*> &x, std::vector<ColorSpinorField*> &y, std::vector<ColorSpinorField*> &z) { // a is upper triangular. // first pass does the caxpyz on the diagonal caxpyz_recurse(a, x, y, z, 0, 0, 0, -1); // second pass does caxpy on the off diagonals caxpyz_recurse(a, x, y, z, 0, 0, 1, -1); } void caxpyz(const Complex *a, ColorSpinorField &x, ColorSpinorField &y, ColorSpinorField &z) { caxpyz(a, x.Components(), y.Components(), z.Components()); } void caxpyz_U(const Complex *a, ColorSpinorField &x, ColorSpinorField &y, ColorSpinorField &z) { caxpyz_U(a, x.Components(), y.Components(), z.Components()); } void caxpyz_L(const Complex *a, ColorSpinorField &x, ColorSpinorField &y, ColorSpinorField &z) { caxpyz_L(a, x.Components(), y.Components(), z.Components()); } void axpyBzpcx(const double *a_, std::vector<ColorSpinorField*> &x_, std::vector<ColorSpinorField*> &y_, const double *b_, ColorSpinorField &z_, const double *c_) { if (y_.size() <= MAX_MULTI_BLAS_N) { // swizzle order since we are writing to x_ and y_, but the // multi-blas only allow writing to y and w, and moreover the // block width of y and w must match, and x and z must match. std::vector<ColorSpinorField*> &y = y_; std::vector<ColorSpinorField*> &w = x_; // wrap a container around the third solo vector std::vector<ColorSpinorField*> x; x.push_back(&z_); // we will curry the parameter arrays into the functor coeff_array<double> a(a_,false), b(b_,false), c(c_,false); if (x[0]->Precision() != y[0]->Precision() ) { mixedMultiBlas<1, multi_axpyBzpcx_, write<0, 1, 0, 1>>(a, b, c, x, y, x, w); } else { multiBlas<1, multi_axpyBzpcx_, write<0, 1, 0, 1>>(a, b, c, x, y, x, w); } } else { // split the problem in half and recurse const double *a0 = &a_[0]; const double *b0 = &b_[0]; const double *c0 = &c_[0]; std::vector<ColorSpinorField*> x0(x_.begin(), x_.begin() + x_.size()/2); std::vector<ColorSpinorField*> y0(y_.begin(), y_.begin() + y_.size()/2); axpyBzpcx(a0, x0, y0, b0, z_, c0); const double *a1 = &a_[y_.size()/2]; const double *b1 = &b_[y_.size()/2]; const double *c1 = &c_[y_.size()/2]; std::vector<ColorSpinorField*> x1(x_.begin() + x_.size()/2, x_.end()); std::vector<ColorSpinorField*> y1(y_.begin() + y_.size()/2, y_.end()); axpyBzpcx(a1, x1, y1, b1, z_, c1); } } void caxpyBxpz(const Complex *a_, std::vector<ColorSpinorField*> &x_, ColorSpinorField &y_, const Complex *b_, ColorSpinorField &z_) { const int xsize = x_.size(); if (xsize <= MAX_MULTI_BLAS_N) // only swizzle if we have to. { // swizzle order since we are writing to y_ and z_, but the // multi-blas only allow writing to y and w, and moreover the // block width of y and w must match, and x and z must match. // Also, wrap a container around them. std::vector<ColorSpinorField*> y; y.push_back(&y_); std::vector<ColorSpinorField*> w; w.push_back(&z_); // we're reading from x std::vector<ColorSpinorField*> &x = x_; // put a and b into constant space coeff_array<Complex> a(a_,true), b(b_,true), c; if (x[0]->Precision() != y[0]->Precision() ) { switch(xsize) { case 1: mixedMultiBlas<1, multi_caxpyBxpz_, write<0, 1, 0, 1>>(a, b, c, x, y, x, w); break; #if MAX_MULTI_BLAS_N >= 2 case 2: mixedMultiBlas<2, multi_caxpyBxpz_, write<0, 1, 0, 1>>(a, b, c, x, y, x, w); break; #if MAX_MULTI_BLAS_N >= 3 case 3: mixedMultiBlas<3, multi_caxpyBxpz_, write<0, 1, 0, 1>>(a, b, c, x, y, x, w); break; #if MAX_MULTI_BLAS_N >= 4 case 4: mixedMultiBlas<4, multi_caxpyBxpz_, write<0, 1, 0, 1>>(a, b, c, x, y, x, w); break; #if MAX_MULTI_BLAS_N >= 5 case 5: mixedMultiBlas<5, multi_caxpyBxpz_, write<0, 1, 0, 1>>(a, b, c, x, y, x, w); break; #if MAX_MULTI_BLAS_N >= 6 case 6: mixedMultiBlas<6, multi_caxpyBxpz_, write<0, 1, 0, 1>>(a, b, c, x, y, x, w); break; #if MAX_MULTI_BLAS_N >= 7 case 7: mixedMultiBlas<7, multi_caxpyBxpz_, write<0, 1, 0, 1>>(a, b, c, x, y, x, w); break; #if MAX_MULTI_BLAS_N >= 8 case 8: mixedMultiBlas<8, multi_caxpyBxpz_, write<0, 1, 0, 1>>(a, b, c, x, y, x, w); break; #if MAX_MULTI_BLAS_N >= 9 case 9: mixedMultiBlas<9, multi_caxpyBxpz_, write<0, 1, 0, 1>>(a, b, c, x, y, x, w); break; #if MAX_MULTI_BLAS_N >= 10 case 10: mixedMultiBlas<10, multi_caxpyBxpz_, write<0, 1, 0, 1>>(a, b, c, x, y, x, w); break; #if MAX_MULTI_BLAS_N >= 11 case 11: mixedMultiBlas<11, multi_caxpyBxpz_, write<0, 1, 0, 1>>(a, b, c, x, y, x, w); break; #if MAX_MULTI_BLAS_N >= 12 case 12: mixedMultiBlas<12, multi_caxpyBxpz_, write<0, 1, 0, 1>>(a, b, c, x, y, x, w); break; #if MAX_MULTI_BLAS_N >= 13 case 13: mixedMultiBlas<13, multi_caxpyBxpz_, write<0, 1, 0, 1>>(a, b, c, x, y, x, w); break; #if MAX_MULTI_BLAS_N >= 14 case 14: mixedMultiBlas<14, multi_caxpyBxpz_, write<0, 1, 0, 1>>(a, b, c, x, y, x, w); break; #if MAX_MULTI_BLAS_N >= 15 case 15: mixedMultiBlas<15, multi_caxpyBxpz_, write<0, 1, 0, 1>>(a, b, c, x, y, x, w); break; #if MAX_MULTI_BLAS_N >= 16 case 16: mixedMultiBlas<16, multi_caxpyBxpz_, write<0, 1, 0, 1>>(a, b, c, x, y, x, w); break; #endif // 16 #endif // 15 #endif // 14 #endif // 13 #endif // 12 #endif // 11 #endif // 10 #endif // 9 #endif // 8 #endif // 7 #endif // 6 #endif // 5 #endif // 4 #endif // 3 #endif // 2 default: // we can't hit the default, it ends up in the else below. break; } } else { switch(xsize) { case 1: multiBlas<1, multi_caxpyBxpz_, write<0, 1, 0, 1>>(a, b, c, x, y, x, w); break; #if MAX_MULTI_BLAS_N >= 2 case 2: multiBlas<2, multi_caxpyBxpz_, write<0, 1, 0, 1>>(a, b, c, x, y, x, w); break; #if MAX_MULTI_BLAS_N >= 3 case 3: multiBlas<3, multi_caxpyBxpz_, write<0, 1, 0, 1>>(a, b, c, x, y, x, w); break; #if MAX_MULTI_BLAS_N >= 4 case 4: multiBlas<4, multi_caxpyBxpz_, write<0, 1, 0, 1>>(a, b, c, x, y, x, w); break; #if MAX_MULTI_BLAS_N >= 5 case 5: multiBlas<5, multi_caxpyBxpz_, write<0, 1, 0, 1>>(a, b, c, x, y, x, w); break; #if MAX_MULTI_BLAS_N >= 6 case 6: multiBlas<6, multi_caxpyBxpz_, write<0, 1, 0, 1>>(a, b, c, x, y, x, w); break; #if MAX_MULTI_BLAS_N >= 7 case 7: multiBlas<7, multi_caxpyBxpz_, write<0, 1, 0, 1>>(a, b, c, x, y, x, w); break; #if MAX_MULTI_BLAS_N >= 8 case 8: multiBlas<8, multi_caxpyBxpz_, write<0, 1, 0, 1>>(a, b, c, x, y, x, w); break; #if MAX_MULTI_BLAS_N >= 9 case 9: multiBlas<9, multi_caxpyBxpz_, write<0, 1, 0, 1>>(a, b, c, x, y, x, w); break; #if MAX_MULTI_BLAS_N >= 10 case 10: multiBlas<10, multi_caxpyBxpz_, write<0, 1, 0, 1>>(a, b, c, x, y, x, w); break; #if MAX_MULTI_BLAS_N >= 11 case 11: multiBlas<11, multi_caxpyBxpz_, write<0, 1, 0, 1>>(a, b, c, x, y, x, w); break; #if MAX_MULTI_BLAS_N >= 12 case 12: multiBlas<12, multi_caxpyBxpz_, write<0, 1, 0, 1>>(a, b, c, x, y, x, w); break; #if MAX_MULTI_BLAS_N >= 13 case 13: multiBlas<13, multi_caxpyBxpz_, write<0, 1, 0, 1>>(a, b, c, x, y, x, w); break; #if MAX_MULTI_BLAS_N >= 14 case 14: multiBlas<14, multi_caxpyBxpz_, write<0, 1, 0, 1>>(a, b, c, x, y, x, w); break; #if MAX_MULTI_BLAS_N >= 15 case 15: multiBlas<15, multi_caxpyBxpz_, write<0, 1, 0, 1>>(a, b, c, x, y, x, w); break; #if MAX_MULTI_BLAS_N >= 16 case 16: multiBlas<16, multi_caxpyBxpz_, write<0, 1, 0, 1>>(a, b, c, x, y, x, w); break; #endif // 16 #endif // 15 #endif // 14 #endif // 13 #endif // 12 #endif // 11 #endif // 10 #endif // 9 #endif // 8 #endif // 7 #endif // 6 #endif // 5 #endif // 4 #endif // 3 #endif // 2 default: // we can't hit the default, it ends up in the else below. break; } } } else { // split the problem in half and recurse const Complex *a0 = &a_[0]; const Complex *b0 = &b_[0]; std::vector<ColorSpinorField*> x0(x_.begin(), x_.begin() + x_.size()/2); caxpyBxpz(a0, x0, y_, b0, z_); const Complex *a1 = &a_[x_.size()/2]; const Complex *b1 = &b_[x_.size()/2]; std::vector<ColorSpinorField*> x1(x_.begin() + x_.size()/2, x_.end()); caxpyBxpz(a1, x1, y_, b1, z_); } } } // namespace blas } // namespace quda
53e2774dbeea96b4476f59b329afb70e0cd8960b.cu
#include <stdlib.h> #include <stdio.h> #include <cstring> // needed for memset #include <typeinfo> #include <tune_quda.h> #include <blas_quda.h> #include <color_spinor_field.h> #include <jitify_helper.cuh> #include <kernels/multi_blas_core.cuh> namespace quda { namespace blas { cudaStream_t* getStream(); template <int writeX, int writeY, int writeZ, int writeW> struct write { static constexpr int X = writeX; static constexpr int Y = writeY; static constexpr int Z = writeZ; static constexpr int W = writeW; }; namespace detail { template <unsigned... digits> struct to_chars { static const char value[]; }; template <unsigned... digits> const char to_chars<digits...>::value[] = {('0' + digits)..., 0}; template <unsigned rem, unsigned... digits> struct explode : explode<rem / 10, rem % 10, digits...> { }; template <unsigned... digits> struct explode<0, digits...> : to_chars<digits...> { }; } // namespace detail template <unsigned num> struct num_to_string : detail::explode<num / 10, num % 10> { }; template <int NXZ, typename FloatN, int M, typename SpinorX, typename SpinorY, typename SpinorZ, typename SpinorW, typename Functor, typename T> class MultiBlas : public TunableVectorY { private: const int NYW; const int nParity; mutable MultiBlasArg<NXZ, SpinorX, SpinorY, SpinorZ, SpinorW, Functor> arg; const coeff_array<T> &a, &b, &c; std::vector<ColorSpinorField *> &x, &y, &z, &w; // host pointers used for backing up fields when tuning // don't curry into the Spinors to minimize parameter size char *Y_h[MAX_MULTI_BLAS_N], *W_h[MAX_MULTI_BLAS_N], *Ynorm_h[MAX_MULTI_BLAS_N], *Wnorm_h[MAX_MULTI_BLAS_N]; bool tuneSharedBytes() const { return false; } public: MultiBlas(SpinorX X[], SpinorY Y[], SpinorZ Z[], SpinorW W[], Functor &f, const coeff_array<T> &a, const coeff_array<T> &b, const coeff_array<T> &c, std::vector<ColorSpinorField *> &x, std::vector<ColorSpinorField *> &y, std::vector<ColorSpinorField *> &z, std::vector<ColorSpinorField *> &w, int NYW, int length) : TunableVectorY(NYW), NYW(NYW), nParity(x[0]->SiteSubset()), arg(X, Y, Z, W, f, NYW, length / nParity), a(a), b(b), c(c), x(x), y(y), z(z), w(w), Y_h(), W_h(), Ynorm_h(), Wnorm_h() { Amatrix_h = reinterpret_cast<signed char *>(const_cast<T *>(a.data)); Bmatrix_h = reinterpret_cast<signed char *>(const_cast<T *>(b.data)); Cmatrix_h = reinterpret_cast<signed char *>(const_cast<T *>(c.data)); strcpy(aux, x[0]->AuxString()); if (x[0]->Precision() != y[0]->Precision()) { strcat(aux, ","); strcat(aux, y[0]->AuxString()); } #ifdef JITIFY ::quda::create_jitify_program("kernels/multi_blas_core.cuh"); #endif } virtual ~MultiBlas() {} inline TuneKey tuneKey() const { char name[TuneKey::name_n]; strcpy(name, num_to_string<NXZ>::value); strcat(name, std::to_string(NYW).c_str()); strcat(name, typeid(arg.f).name()); return TuneKey(x[0]->VolString(), name, aux); } inline void apply(const cudaStream_t &stream) { TuneParam tp = tuneLaunch(*this, getTuning(), getVerbosity()); typedef typename scalar<FloatN>::type Float; typedef typename vector<Float, 2>::type Float2; #ifdef JITIFY using namespace jitify::reflection; auto instance = program->kernel("quda::blas::multiBlasKernel").instantiate(Type<FloatN>(), M, NXZ, Type<decltype(arg)>()); // FIXME - if NXZ=1 no need to copy entire array // FIXME - do we really need strided access here? if (a.data && a.use_const) { Float2 A[MAX_MATRIX_SIZE / sizeof(Float2)]; // since the kernel doesn't know the width of them matrix at compile // time we stride it and copy the padded matrix to GPU for (int i = 0; i < NXZ; i++) for (int j = 0; j < NYW; j++) A[MAX_MULTI_BLAS_N * i + j] = make_Float2<Float2>(Complex(a.data[NYW * i + j])); auto Amatrix_d = instance.get_constant_ptr("quda::blas::Amatrix_d"); cuMemcpyHtoDAsync(Amatrix_d, A, MAX_MATRIX_SIZE, *getStream()); } if (b.data && b.use_const) { Float2 B[MAX_MATRIX_SIZE / sizeof(Float2)]; // since the kernel doesn't know the width of them matrix at compile // time we stride it and copy the padded matrix to GPU for (int i = 0; i < NXZ; i++) for (int j = 0; j < NYW; j++) B[MAX_MULTI_BLAS_N * i + j] = make_Float2<Float2>(Complex(b.data[NYW * i + j])); auto Bmatrix_d = instance.get_constant_ptr("quda::blas::Bmatrix_d"); cuMemcpyHtoDAsync(Bmatrix_d, B, MAX_MATRIX_SIZE, *getStream()); } if (c.data && c.use_const) { Float2 C[MAX_MATRIX_SIZE / sizeof(Float2)]; // since the kernel doesn't know the width of them matrix at compile // time we stride it and copy the padded matrix to GPU for (int i = 0; i < NXZ; i++) for (int j = 0; j < NYW; j++) C[MAX_MULTI_BLAS_N * i + j] = make_Float2<Float2>(Complex(c.data[NYW * i + j])); auto Cmatrix_d = instance.get_constant_ptr("quda::blas::Cmatrix_d"); cuMemcpyHtoDAsync(Cmatrix_d, C, MAX_MATRIX_SIZE, *getStream()); } jitify_error = instance.configure(tp.grid, tp.block, tp.shared_bytes, stream).launch(arg); #else // FIXME - if NXZ=1 no need to copy entire array // FIXME - do we really need strided access here? if (a.data && a.use_const) { Float2 A[MAX_MATRIX_SIZE / sizeof(Float2)]; // since the kernel doesn't know the width of them matrix at compile // time we stride it and copy the padded matrix to GPU for (int i = 0; i < NXZ; i++) for (int j = 0; j < NYW; j++) A[MAX_MULTI_BLAS_N * i + j] = make_Float2<Float2>(Complex(a.data[NYW * i + j])); cudaMemcpyToSymbolAsync(Amatrix_d, A, MAX_MATRIX_SIZE, 0, cudaMemcpyHostToDevice, *getStream()); } if (b.data && b.use_const) { Float2 B[MAX_MATRIX_SIZE / sizeof(Float2)]; // since the kernel doesn't know the width of them matrix at compile // time we stride it and copy the padded matrix to GPU for (int i = 0; i < NXZ; i++) for (int j = 0; j < NYW; j++) B[MAX_MULTI_BLAS_N * i + j] = make_Float2<Float2>(Complex(b.data[NYW * i + j])); cudaMemcpyToSymbolAsync(Bmatrix_d, B, MAX_MATRIX_SIZE, 0, cudaMemcpyHostToDevice, *getStream()); } if (c.data && c.use_const) { Float2 C[MAX_MATRIX_SIZE / sizeof(Float2)]; // since the kernel doesn't know the width of them matrix at compile // time we stride it and copy the padded matrix to GPU for (int i = 0; i < NXZ; i++) for (int j = 0; j < NYW; j++) C[MAX_MULTI_BLAS_N * i + j] = make_Float2<Float2>(Complex(c.data[NYW * i + j])); cudaMemcpyToSymbolAsync(Cmatrix_d, C, MAX_MATRIX_SIZE, 0, cudaMemcpyHostToDevice, *getStream()); } #if CUDA_VERSION < 9000 cudaMemcpyToSymbolAsync(arg_buffer, reinterpret_cast<char *>(&arg), sizeof(arg), 0, cudaMemcpyHostToDevice, *getStream()); #endif multiBlasKernel<FloatN, M, NXZ><<<tp.grid, tp.block, tp.shared_bytes, stream>>>(arg); #endif } void preTune() { for (int i = 0; i < NYW; ++i) { arg.Y[i].backup(&Y_h[i], &Ynorm_h[i], y[i]->Bytes(), y[i]->NormBytes()); arg.W[i].backup(&W_h[i], &Wnorm_h[i], w[i]->Bytes(), w[i]->NormBytes()); } } void postTune() { for (int i = 0; i < NYW; ++i) { arg.Y[i].restore(&Y_h[i], &Ynorm_h[i], y[i]->Bytes(), y[i]->NormBytes()); arg.W[i].restore(&W_h[i], &Wnorm_h[i], w[i]->Bytes(), w[i]->NormBytes()); } } void initTuneParam(TuneParam &param) const { TunableVectorY::initTuneParam(param); param.grid.z = nParity; } void defaultTuneParam(TuneParam &param) const { TunableVectorY::defaultTuneParam(param); param.grid.z = nParity; } long long flops() const { return arg.f.flops() * vec_length<FloatN>::value * (long)arg.length * nParity * M; } long long bytes() const { // the factor two here assumes we are reading and writing to the high precision vector return ((arg.f.streams() - 2) * x[0]->Bytes() + 2 * y[0]->Bytes()); } int tuningIter() const { return 3; } }; template <int NXZ, typename RegType, typename StoreType, typename yType, int M, template <int, typename, typename> class Functor, typename write, typename T> void multiBlas(const coeff_array<T> &a, const coeff_array<T> &b, const coeff_array<T> &c, std::vector<ColorSpinorField *> &x, std::vector<ColorSpinorField *> &y, std::vector<ColorSpinorField *> &z, std::vector<ColorSpinorField *> &w, int length) { const int NYW = y.size(); const int N = NXZ > NYW ? NXZ : NYW; if (N > MAX_MULTI_BLAS_N) errorQuda("Spinor vector length exceeds max size (%d > %d)", N, MAX_MULTI_BLAS_N); if (NXZ * NYW * sizeof(Complex) > MAX_MATRIX_SIZE) errorQuda("A matrix exceeds max size (%lu > %d)", NXZ * NYW * sizeof(Complex), MAX_MATRIX_SIZE); typedef typename scalar<RegType>::type Float; typedef typename vector<Float, 2>::type Float2; typedef vector<Float, 2> vec2; SpinorTexture<RegType, StoreType, M> X[NXZ]; Spinor<RegType, yType, M, write::Y> Y[MAX_MULTI_BLAS_N]; SpinorTexture<RegType, StoreType, M> Z[NXZ]; Spinor<RegType, StoreType, M, write::W> W[MAX_MULTI_BLAS_N]; for (int i = 0; i < NXZ; i++) { X[i].set(*dynamic_cast<cudaColorSpinorField *>(x[i])); Z[i].set(*dynamic_cast<cudaColorSpinorField *>(z[i])); } for (int i = 0; i < NYW; i++) { Y[i].set(*dynamic_cast<cudaColorSpinorField *>(y[i])); W[i].set(*dynamic_cast<cudaColorSpinorField *>(w[i])); } // if block caxpy is an 'outer product of caxpy' where 'x' Functor<NXZ, Float2, RegType> f(a, b, c, NYW); MultiBlas<NXZ, RegType, M, SpinorTexture<RegType, StoreType, M>, Spinor<RegType, yType, M, write::Y>, SpinorTexture<RegType, StoreType, M>, Spinor<RegType, StoreType, M, write::W>, decltype(f), T> blas(X, Y, Z, W, f, a, b, c, x, y, z, w, NYW, length); blas.apply(*getStream()); blas::bytes += blas.bytes(); blas::flops += blas.flops(); checkCudaError(); } /** Driver for generic blas routine with four loads and two store. */ template <int NXZ, template <int MXZ, typename Float, typename FloatN> class Functor, typename write, typename T> void multiBlas(const coeff_array<T> &a, const coeff_array<T> &b, const coeff_array<T> &c, CompositeColorSpinorField &x, CompositeColorSpinorField &y, CompositeColorSpinorField &z, CompositeColorSpinorField &w) { if (checkLocation(*x[0], *y[0], *z[0], *w[0]) == QUDA_CUDA_FIELD_LOCATION) { if (y[0]->Precision() == QUDA_DOUBLE_PRECISION && x[0]->Precision() == QUDA_DOUBLE_PRECISION) { #if QUDA_PRECISION & 8 #if defined(GPU_WILSON_DIRAC) || defined(GPU_DOMAIN_WALL_DIRAC) || defined(GPU_STAGGERED_DIRAC) const int M = 1; multiBlas<NXZ, double2, double2, double2, M, Functor, write>(a, b, c, x, y, z, w, x[0]->Length() / (2 * M)); #else errorQuda("blas has not been built for Nspin=%d fields", x[0]->Nspin()); #endif #else errorQuda("QUDA_PRECISION=%d does not enable precision %d", QUDA_PRECISION, x[0]->Precision()); #endif } else if (y[0]->Precision() == QUDA_SINGLE_PRECISION && x[0]->Precision() == QUDA_SINGLE_PRECISION) { #if QUDA_PRECISION & 4 if (x[0]->Nspin() == 4) { #if defined(GPU_WILSON_DIRAC) || defined(GPU_DOMAIN_WALL_DIRAC) const int M = 1; multiBlas<NXZ, float4, float4, float4, M, Functor, write>(a, b, c, x, y, z, w, x[0]->Length() / (4 * M)); #else errorQuda("blas has not been built for Nspin=%d fields", x[0]->Nspin()); #endif } else if (x[0]->Nspin() == 2 || x[0]->Nspin() == 1) { #if defined(GPU_WILSON_DIRAC) || defined(GPU_DOMAIN_WALL_DIRAC) || defined(GPU_STAGGERED_DIRAC) const int M = 1; multiBlas<NXZ, float2, float2, float2, M, Functor, write>(a, b, c, x, y, z, w, x[0]->Length() / (2 * M)); #else errorQuda("blas has not been built for Nspin=%d fields", x[0]->Nspin()); #endif } else { errorQuda("nSpin=%d is not supported\n", x[0]->Nspin()); } #else errorQuda("QUDA_PRECISION=%d does not enable precision %d", QUDA_PRECISION, x[0]->Precision()); #endif } else if (y[0]->Precision() == QUDA_HALF_PRECISION && x[0]->Precision() == QUDA_HALF_PRECISION) { #if QUDA_PRECISION & 2 if (x[0]->Ncolor() != 3) { errorQuda("nColor = %d is not supported", x[0]->Ncolor()); } if (x[0]->Nspin() == 4) { // wilson #if defined(GPU_WILSON_DIRAC) || defined(GPU_DOMAIN_WALL_DIRAC) const int M = 6; multiBlas<NXZ, float4, short4, short4, M, Functor, write>(a, b, c, x, y, z, w, x[0]->Volume()); #else errorQuda("blas has not been built for Nspin=%d fields", x[0]->Nspin()); #endif } else if (x[0]->Nspin() == 1) { // staggered #ifdef GPU_STAGGERED_DIRAC const int M = 3; multiBlas<NXZ, float2, short2, short2, M, Functor, write>(a, b, c, x, y, z, w, x[0]->Volume()); #else errorQuda("blas has not been built for Nspin=%d fields", x[0]->Nspin()); #endif } else { errorQuda("nSpin=%d is not supported\n", x[0]->Nspin()); } #else errorQuda("QUDA_PRECISION=%d does not enable precision %d", QUDA_PRECISION, x[0]->Precision()); #endif } else if (y[0]->Precision() == QUDA_QUARTER_PRECISION && x[0]->Precision() == QUDA_QUARTER_PRECISION) { #if QUDA_PRECISION & 1 if (x[0]->Ncolor() != 3) { errorQuda("nColor = %d is not supported", x[0]->Ncolor()); } if (x[0]->Nspin() == 4) { // wilson #if defined(GPU_WILSON_DIRAC) || defined(GPU_DOMAIN_WALL_DIRAC) const int M = 6; multiBlas<NXZ, float4, char4, char4, M, Functor, write>(a, b, c, x, y, z, w, x[0]->Volume()); #else errorQuda("blas has not been built for Nspin=%d fields", x[0]->Nspin()); #endif } else if (x[0]->Nspin() == 1) { // staggered #ifdef GPU_STAGGERED_DIRAC const int M = 3; multiBlas<NXZ, float2, char2, char2, M, Functor, write>(a, b, c, x, y, z, w, x[0]->Volume()); #else errorQuda("blas has not been built for Nspin=%d fields", x[0]->Nspin()); #endif } else { errorQuda("nSpin=%d is not supported\n", x[0]->Nspin()); } #else errorQuda("QUDA_PRECISION=%d does not enable precision %d", QUDA_PRECISION, x[0]->Precision()); #endif } else { errorQuda("Precision combination x=%d not supported\n", x[0]->Precision()); } } else { // fields on the cpu errorQuda("Not implemented"); } } /** Driver for generic blas routine with four loads and two store. */ template <int NXZ, template <int MXZ, typename Float, typename FloatN> class Functor, typename write, typename T> void mixedMultiBlas(const coeff_array<T> &a, const coeff_array<T> &b, const coeff_array<T> &c, CompositeColorSpinorField &x, CompositeColorSpinorField &y, CompositeColorSpinorField &z, CompositeColorSpinorField &w) { if (checkLocation(*x[0], *y[0], *z[0], *w[0]) == QUDA_CUDA_FIELD_LOCATION) { if (y[0]->Precision() == QUDA_DOUBLE_PRECISION) { #if QUDA_PRECISION & 8 if (x[0]->Precision() == QUDA_SINGLE_PRECISION) { #if QUDA_PRECISION & 4 if (x[0]->Nspin() == 4) { #if defined(GPU_WILSON_DIRAC) || defined(GPU_DOMAIN_WALL_DIRAC) const int M = 12; multiBlas<NXZ, double2, float4, double2, M, Functor, write>(a, b, c, x, y, z, w, x[0]->Volume()); #else errorQuda("blas has not been built for Nspin=%d fields", x[0]->Nspin()); #endif } else if (x[0]->Nspin() == 1) { #if defined(GPU_STAGGERED_DIRAC) const int M = 3; multiBlas<NXZ, double2, float2, double2, M, Functor, write>(a, b, c, x, y, z, w, x[0]->Volume()); #else errorQuda("blas has not been built for Nspin=%d fields", x[0]->Nspin()); #endif } #else errorQuda("QUDA_PRECISION=%d does not enable precision %d", QUDA_PRECISION, x[0]->Precision()); #endif } else if (x[0]->Precision() == QUDA_HALF_PRECISION) { #if QUDA_PRECISION & 2 if (x[0]->Nspin() == 4) { #if defined(GPU_WILSON_DIRAC) || defined(GPU_DOMAIN_WALL_DIRAC) const int M = 12; multiBlas<NXZ, double2, short4, double2, M, Functor, write>(a, b, c, x, y, z, w, x[0]->Volume()); #else errorQuda("blas has not been built for Nspin=%d fields", x[0]->Nspin()); #endif } else if (x[0]->Nspin() == 1) { #if defined(GPU_STAGGERED_DIRAC) const int M = 3; multiBlas<NXZ, double2, short2, double2, M, Functor, write>(a, b, c, x, y, z, w, x[0]->Volume()); #else errorQuda("blas has not been built for Nspin=%d fields", x[0]->Nspin()); #endif } #else errorQuda("QUDA_PRECISION=%d does not enable precision %d", QUDA_PRECISION, x[0]->Precision()); #endif } else if (x[0]->Precision() == QUDA_QUARTER_PRECISION) { #if QUDA_PRECISION & 1 if (x[0]->Nspin() == 4) { #if defined(GPU_WILSON_DIRAC) || defined(GPU_DOMAIN_WALL_DIRAC) const int M = 12; multiBlas<NXZ, double2, char4, double2, M, Functor, write>(a, b, c, x, y, z, w, x[0]->Volume()); #else errorQuda("blas has not been built for Nspin=%d fields", x[0]->Nspin()); #endif } else if (x[0]->Nspin() == 1) { #if defined(GPU_STAGGERED_DIRAC) const int M = 3; multiBlas<NXZ, double2, char2, double2, M, Functor, write>(a, b, c, x, y, z, w, x[0]->Volume()); #else errorQuda("blas has not been built for Nspin=%d fields", x[0]->Nspin()); #endif } #else errorQuda("QUDA_PRECISION=%d does not enable precision %d", QUDA_PRECISION, x[0]->Precision()); #endif } else { errorQuda("Not implemented for this precision combination %d %d", x[0]->Precision(), y[0]->Precision()); } #else errorQuda("QUDA_PRECISION=%d does not enable precision %d", QUDA_PRECISION, y[0]->Precision()); #endif } else if (y[0]->Precision() == QUDA_SINGLE_PRECISION) { #if (QUDA_PRECISION & 4) if (x[0]->Precision() == QUDA_HALF_PRECISION) { #if (QUDA_PRECISION & 2) if (x[0]->Nspin() == 4) { #if defined(GPU_WILSON_DIRAC) || defined(GPU_DOMAIN_WALL_DIRAC) const int M = 6; multiBlas<NXZ, float4, short4, float4, M, Functor, write>(a, b, c, x, y, z, w, x[0]->Volume()); #else errorQuda("blas has not been built for Nspin=%d fields", x[0]->Nspin()); #endif } else if (x[0]->Nspin() == 2 || x[0]->Nspin() == 1) { #if defined(GPU_WILSON_DIRAC) || defined(GPU_DOMAIN_WALL_DIRAC) || defined(GPU_STAGGERED_DIRAC) const int M = 3; multiBlas<NXZ, float2, short2, float2, M, Functor, write>(a, b, c, x, y, z, w, x[0]->Volume()); #else errorQuda("blas has not been built for Nspin=%d fields", x[0]->Nspin()); #endif } else { errorQuda("nSpin=%d is not supported\n", x[0]->Nspin()); } #else errorQuda("QUDA_PRECISION=%d does not enable precision %d", QUDA_PRECISION, y[0]->Precision()); #endif } else if (x[0]->Precision() == QUDA_QUARTER_PRECISION) { #if (QUDA_PRECISION & 1) if (x[0]->Nspin() == 4) { #if defined(GPU_WILSON_DIRAC) || defined(GPU_DOMAIN_WALL_DIRAC) const int M = 6; multiBlas<NXZ, float4, char4, float4, M, Functor, write>(a, b, c, x, y, z, w, x[0]->Volume()); #else errorQuda("blas has not been built for Nspin=%d fields", x[0]->Nspin()); #endif } else if (x[0]->Nspin() == 2 || x[0]->Nspin() == 1) { #if defined(GPU_WILSON_DIRAC) || defined(GPU_DOMAIN_WALL_DIRAC) || defined(GPU_STAGGERED_DIRAC) const int M = 3; multiBlas<NXZ, float2, char2, float2, M, Functor, write>(a, b, c, x, y, z, w, x[0]->Volume()); #else errorQuda("blas has not been built for Nspin=%d fields", x[0]->Nspin()); #endif } else { errorQuda("nSpin=%d is not supported\n", x[0]->Nspin()); } #else errorQuda("QUDA_PRECISION=%d does not enable precision %d", QUDA_PRECISION, y[0]->Precision()); #endif } else { errorQuda("Precision combination x=%d y=%d not supported\n", x[0]->Precision(), y[0]->Precision()); } #else errorQuda("QUDA_PRECISION=%d does not enable precision %d", QUDA_PRECISION, y[0]->Precision()); #endif } else { errorQuda("Precision combination x=%d y=%d not supported\n", x[0]->Precision(), y[0]->Precision()); } } else { // fields on the cpu errorQuda("Not implemented"); } } void caxpy_recurse(const Complex *a_, std::vector<ColorSpinorField*> &x, std::vector<ColorSpinorField*> &y, int i_idx ,int j_idx, int upper) { if (y.size() > MAX_MULTI_BLAS_N) // if greater than max single-kernel size, recurse. { // We need to split up 'a' carefully since it's row-major. Complex* tmpmajor = new Complex[x.size()*y.size()]; Complex* tmpmajor0 = &tmpmajor[0]; Complex* tmpmajor1 = &tmpmajor[x.size()*(y.size()/2)]; std::vector<ColorSpinorField*> y0(y.begin(), y.begin() + y.size()/2); std::vector<ColorSpinorField*> y1(y.begin() + y.size()/2, y.end()); const unsigned int xlen = x.size(); const unsigned int ylen0 = y.size()/2; const unsigned int ylen1 = y.size() - y.size()/2; int count = 0, count0 = 0, count1 = 0; for (unsigned int i = 0; i < xlen; i++) { for (unsigned int j = 0; j < ylen0; j++) tmpmajor0[count0++] = a_[count++]; for (unsigned int j = 0; j < ylen1; j++) tmpmajor1[count1++] = a_[count++]; } caxpy_recurse(tmpmajor0, x, y0, i_idx, 2*j_idx+0, upper); caxpy_recurse(tmpmajor1, x, y1, i_idx, 2*j_idx+1, upper); delete[] tmpmajor; } else { // if at the bottom of recursion, // return if on lower left for upper triangular, // return if on upper right for lower triangular. if (x.size() <= MAX_MULTI_BLAS_N) { if (upper == 1 && j_idx < i_idx) { return; } if (upper == -1 && j_idx > i_idx) { return; } } // mark true since we will copy the "a" matrix into constant memory coeff_array<Complex> a(a_, true), b, c; if (x[0]->Precision() == y[0]->Precision()) { switch (x.size()) { case 1: multiBlas<1, multicaxpy_, write<0, 1, 0, 0>>(a, b, c, x, y, x, y); break; #if MAX_MULTI_BLAS_N >= 2 case 2: multiBlas<2, multicaxpy_, write<0, 1, 0, 0>>(a, b, c, x, y, x, y); break; #if MAX_MULTI_BLAS_N >= 3 case 3: multiBlas<3, multicaxpy_, write<0, 1, 0, 0>>(a, b, c, x, y, x, y); break; #if MAX_MULTI_BLAS_N >= 4 case 4: multiBlas<4, multicaxpy_, write<0, 1, 0, 0>>(a, b, c, x, y, x, y); break; #if MAX_MULTI_BLAS_N >= 5 case 5: multiBlas<5, multicaxpy_, write<0, 1, 0, 0>>(a, b, c, x, y, x, y); break; #if MAX_MULTI_BLAS_N >= 6 case 6: multiBlas<6, multicaxpy_, write<0, 1, 0, 0>>(a, b, c, x, y, x, y); break; #if MAX_MULTI_BLAS_N >= 7 case 7: multiBlas<7, multicaxpy_, write<0, 1, 0, 0>>(a, b, c, x, y, x, y); break; #if MAX_MULTI_BLAS_N >= 8 case 8: multiBlas<8, multicaxpy_, write<0, 1, 0, 0>>(a, b, c, x, y, x, y); break; #if MAX_MULTI_BLAS_N >= 9 case 9: multiBlas<9, multicaxpy_, write<0, 1, 0, 0>>(a, b, c, x, y, x, y); break; #if MAX_MULTI_BLAS_N >= 10 case 10: multiBlas<10, multicaxpy_, write<0, 1, 0, 0>>(a, b, c, x, y, x, y); break; #if MAX_MULTI_BLAS_N >= 11 case 11: multiBlas<11, multicaxpy_, write<0, 1, 0, 0>>(a, b, c, x, y, x, y); break; #if MAX_MULTI_BLAS_N >= 12 case 12: multiBlas<12, multicaxpy_, write<0, 1, 0, 0>>(a, b, c, x, y, x, y); break; #if MAX_MULTI_BLAS_N >= 13 case 13: multiBlas<13, multicaxpy_, write<0, 1, 0, 0>>(a, b, c, x, y, x, y); break; #if MAX_MULTI_BLAS_N >= 14 case 14: multiBlas<14, multicaxpy_, write<0, 1, 0, 0>>(a, b, c, x, y, x, y); break; #if MAX_MULTI_BLAS_N >= 15 case 15: multiBlas<15, multicaxpy_, write<0, 1, 0, 0>>(a, b, c, x, y, x, y); break; #if MAX_MULTI_BLAS_N >= 16 case 16: multiBlas<16, multicaxpy_, write<0, 1, 0, 0>>(a, b, c, x, y, x, y); break; #endif // 16 #endif // 15 #endif // 14 #endif // 13 #endif // 12 #endif // 11 #endif // 10 #endif // 9 #endif // 8 #endif // 7 #endif // 6 #endif // 5 #endif // 4 #endif // 3 #endif // 2 default: // split the problem in half and recurse const Complex *a0 = &a_[0]; const Complex *a1 = &a_[(x.size()/2)*y.size()]; std::vector<ColorSpinorField*> x0(x.begin(), x.begin() + x.size()/2); std::vector<ColorSpinorField*> x1(x.begin() + x.size()/2, x.end()); caxpy_recurse(a0, x0, y, 2*i_idx+0, j_idx, upper); caxpy_recurse(a1, x1, y, 2*i_idx+1, j_idx, upper); break; } } else // precisions don't agree. { switch (x.size()) { case 1: mixedMultiBlas<1, multicaxpy_, write<0, 1, 0, 0>>(a, b, c, x, y, x, y); break; #if MAX_MULTI_BLAS_N >= 2 case 2: mixedMultiBlas<2, multicaxpy_, write<0, 1, 0, 0>>(a, b, c, x, y, x, y); break; #if MAX_MULTI_BLAS_N >= 3 case 3: mixedMultiBlas<3, multicaxpy_, write<0, 1, 0, 0>>(a, b, c, x, y, x, y); break; #if MAX_MULTI_BLAS_N >= 4 case 4: mixedMultiBlas<4, multicaxpy_, write<0, 1, 0, 0>>(a, b, c, x, y, x, y); break; #if MAX_MULTI_BLAS_N >= 5 case 5: mixedMultiBlas<5, multicaxpy_, write<0, 1, 0, 0>>(a, b, c, x, y, x, y); break; #if MAX_MULTI_BLAS_N >= 6 case 6: mixedMultiBlas<6, multicaxpy_, write<0, 1, 0, 0>>(a, b, c, x, y, x, y); break; #if MAX_MULTI_BLAS_N >= 7 case 7: mixedMultiBlas<7, multicaxpy_, write<0, 1, 0, 0>>(a, b, c, x, y, x, y); break; #if MAX_MULTI_BLAS_N >= 8 case 8: mixedMultiBlas<8, multicaxpy_, write<0, 1, 0, 0>>(a, b, c, x, y, x, y); break; #if MAX_MULTI_BLAS_N >= 9 case 9: mixedMultiBlas<9, multicaxpy_, write<0, 1, 0, 0>>(a, b, c, x, y, x, y); break; #if MAX_MULTI_BLAS_N >= 10 case 10: mixedMultiBlas<10, multicaxpy_, write<0, 1, 0, 0>>(a, b, c, x, y, x, y); break; #if MAX_MULTI_BLAS_N >= 11 case 11: mixedMultiBlas<11, multicaxpy_, write<0, 1, 0, 0>>(a, b, c, x, y, x, y); break; #if MAX_MULTI_BLAS_N >= 12 case 12: mixedMultiBlas<12, multicaxpy_, write<0, 1, 0, 0>>(a, b, c, x, y, x, y); break; #if MAX_MULTI_BLAS_N >= 13 case 13: mixedMultiBlas<13, multicaxpy_, write<0, 1, 0, 0>>(a, b, c, x, y, x, y); break; #if MAX_MULTI_BLAS_N >= 14 case 14: mixedMultiBlas<14, multicaxpy_, write<0, 1, 0, 0>>(a, b, c, x, y, x, y); break; #if MAX_MULTI_BLAS_N >= 15 case 15: mixedMultiBlas<15, multicaxpy_, write<0, 1, 0, 0>>(a, b, c, x, y, x, y); break; #if MAX_MULTI_BLAS_N >= 16 case 16: mixedMultiBlas<16, multicaxpy_, write<0, 1, 0, 0>>(a, b, c, x, y, x, y); break; #endif // 16 #endif // 15 #endif // 14 #endif // 13 #endif // 12 #endif // 11 #endif // 10 #endif // 9 #endif // 8 #endif // 7 #endif // 6 #endif // 5 #endif // 4 #endif // 3 #endif // 2 default: // split the problem in half and recurse const Complex *a0 = &a_[0]; const Complex *a1 = &a_[(x.size()/2)*y.size()]; std::vector<ColorSpinorField*> x0(x.begin(), x.begin() + x.size()/2); std::vector<ColorSpinorField*> x1(x.begin() + x.size()/2, x.end()); caxpy_recurse(a0, x0, y, 2*i_idx+0, j_idx, upper); caxpy_recurse(a1, x1, y, 2*i_idx+1, j_idx, upper); break; } } } // end if (y.size() > MAX_MULTI_BLAS_N) } void caxpy(const Complex *a_, std::vector<ColorSpinorField*> &x, std::vector<ColorSpinorField*> &y) { // Enter a recursion. // Pass a, x, y. (0,0) indexes the tiles. false specifies the matrix is unstructured. caxpy_recurse(a_, x, y, 0, 0, 0); } void caxpy_U(const Complex *a_, std::vector<ColorSpinorField*> &x, std::vector<ColorSpinorField*> &y) { // Enter a recursion. // Pass a, x, y. (0,0) indexes the tiles. 1 indicates the matrix is upper-triangular, // which lets us skip some tiles. if (x.size() != y.size()) { errorQuda("An optimal block caxpy_U with non-square 'a' has not yet been implemented. Use block caxpy instead.\n"); return; } caxpy_recurse(a_, x, y, 0, 0, 1); } void caxpy_L(const Complex *a_, std::vector<ColorSpinorField*> &x, std::vector<ColorSpinorField*> &y) { // Enter a recursion. // Pass a, x, y. (0,0) indexes the tiles. -1 indicates the matrix is lower-triangular // which lets us skip some tiles. if (x.size() != y.size()) { errorQuda("An optimal block caxpy_L with non-square 'a' has not yet been implemented. Use block caxpy instead.\n"); return; } caxpy_recurse(a_, x, y, 0, 0, -1); } void caxpy(const Complex *a, ColorSpinorField &x, ColorSpinorField &y) { caxpy(a, x.Components(), y.Components()); } void caxpy_U(const Complex *a, ColorSpinorField &x, ColorSpinorField &y) { caxpy_U(a, x.Components(), y.Components()); } void caxpy_L(const Complex *a, ColorSpinorField &x, ColorSpinorField &y) { caxpy_L(a, x.Components(), y.Components()); } void caxpyz_recurse(const Complex *a_, std::vector<ColorSpinorField*> &x, std::vector<ColorSpinorField*> &y, std::vector<ColorSpinorField*> &z, int i, int j, int pass, int upper) { if (y.size() > MAX_MULTI_BLAS_N) // if greater than max single-kernel size, recurse. { // We need to split up 'a' carefully since it's row-major. Complex* tmpmajor = new Complex[x.size()*y.size()]; Complex* tmpmajor0 = &tmpmajor[0]; Complex* tmpmajor1 = &tmpmajor[x.size()*(y.size()/2)]; std::vector<ColorSpinorField*> y0(y.begin(), y.begin() + y.size()/2); std::vector<ColorSpinorField*> y1(y.begin() + y.size()/2, y.end()); std::vector<ColorSpinorField*> z0(z.begin(), z.begin() + z.size()/2); std::vector<ColorSpinorField*> z1(z.begin() + z.size()/2, z.end()); const unsigned int xlen = x.size(); const unsigned int ylen0 = y.size()/2; const unsigned int ylen1 = y.size() - y.size()/2; int count = 0, count0 = 0, count1 = 0; for (unsigned int i_ = 0; i_ < xlen; i_++) { for (unsigned int j = 0; j < ylen0; j++) tmpmajor0[count0++] = a_[count++]; for (unsigned int j = 0; j < ylen1; j++) tmpmajor1[count1++] = a_[count++]; } caxpyz_recurse(tmpmajor0, x, y0, z0, i, 2*j+0, pass, upper); caxpyz_recurse(tmpmajor1, x, y1, z1, i, 2*j+1, pass, upper); delete[] tmpmajor; } else { // if at bottom of recursion check where we are if (x.size() <= MAX_MULTI_BLAS_N) { if (pass==1) { if (i!=j) { if (upper == 1 && j < i) { return; } // upper right, don't need to update lower left. if (upper == -1 && i < j) { return; } // lower left, don't need to update upper right. caxpy(a_, x, z); return; // off diagonal } return; } else { if (i!=j) return; // We're on the first pass, so we only want to update the diagonal. } } // mark true since we will copy the "a" matrix into constant memory coeff_array<Complex> a(a_, true), b, c; if (x[0]->Precision() == y[0]->Precision()) { switch (x.size()) { case 1: multiBlas<1, multicaxpyz_, write<0, 0, 0, 1>>(a, b, c, x, y, x, z); break; #if MAX_MULTI_BLAS_N >= 2 case 2: multiBlas<2, multicaxpyz_, write<0, 0, 0, 1>>(a, b, c, x, y, x, z); break; #if MAX_MULTI_BLAS_N >= 3 case 3: multiBlas<3, multicaxpyz_, write<0, 0, 0, 1>>(a, b, c, x, y, x, z); break; #if MAX_MULTI_BLAS_N >= 4 case 4: multiBlas<4, multicaxpyz_, write<0, 0, 0, 1>>(a, b, c, x, y, x, z); break; #if MAX_MULTI_BLAS_N >= 5 case 5: multiBlas<5, multicaxpyz_, write<0, 0, 0, 1>>(a, b, c, x, y, x, z); break; #if MAX_MULTI_BLAS_N >= 6 case 6: multiBlas<6, multicaxpyz_, write<0, 0, 0, 1>>(a, b, c, x, y, x, z); break; #if MAX_MULTI_BLAS_N >= 7 case 7: multiBlas<7, multicaxpyz_, write<0, 0, 0, 1>>(a, b, c, x, y, x, z); break; #if MAX_MULTI_BLAS_N >= 8 case 8: multiBlas<8, multicaxpyz_, write<0, 0, 0, 1>>(a, b, c, x, y, x, z); break; #if MAX_MULTI_BLAS_N >= 9 case 9: multiBlas<9, multicaxpyz_, write<0, 0, 0, 1>>(a, b, c, x, y, x, z); break; #if MAX_MULTI_BLAS_N >= 10 case 10: multiBlas<10, multicaxpyz_, write<0, 0, 0, 1>>(a, b, c, x, y, x, z); break; #if MAX_MULTI_BLAS_N >= 11 case 11: multiBlas<11, multicaxpyz_, write<0, 0, 0, 1>>(a, b, c, x, y, x, z); break; #if MAX_MULTI_BLAS_N >= 12 case 12: multiBlas<12, multicaxpyz_, write<0, 0, 0, 1>>(a, b, c, x, y, x, z); break; #if MAX_MULTI_BLAS_N >= 13 case 13: multiBlas<13, multicaxpyz_, write<0, 0, 0, 1>>(a, b, c, x, y, x, z); break; #if MAX_MULTI_BLAS_N >= 14 case 14: multiBlas<14, multicaxpyz_, write<0, 0, 0, 1>>(a, b, c, x, y, x, z); break; #if MAX_MULTI_BLAS_N >= 15 case 15: multiBlas<15, multicaxpyz_, write<0, 0, 0, 1>>(a, b, c, x, y, x, z); break; #if MAX_MULTI_BLAS_N >= 16 case 16: multiBlas<16, multicaxpyz_, write<0, 0, 0, 1>>(a, b, c, x, y, x, z); break; #endif // 16 #endif // 15 #endif // 14 #endif // 13 #endif // 12 #endif // 11 #endif // 10 #endif // 9 #endif // 8 #endif // 7 #endif // 6 #endif // 5 #endif // 4 #endif // 3 #endif // 2 default: // split the problem in half and recurse const Complex *a0 = &a_[0]; const Complex *a1 = &a_[(x.size()/2)*y.size()]; std::vector<ColorSpinorField*> x0(x.begin(), x.begin() + x.size()/2); std::vector<ColorSpinorField*> x1(x.begin() + x.size()/2, x.end()); caxpyz_recurse(a0, x0, y, z, 2*i+0, j, pass, upper); caxpyz_recurse(a1, x1, y, z, 2*i+1, j, pass, upper); // b/c we don't want to re-zero z. break; } } else // precisions don't agree. { switch (x.size()) { case 1: mixedMultiBlas<1, multicaxpyz_, write<0, 0, 0, 1>>(a, b, c, x, y, x, z); break; #if MAX_MULTI_BLAS_N >= 2 case 2: mixedMultiBlas<2, multicaxpyz_, write<0, 0, 0, 1>>(a, b, c, x, y, x, z); break; #if MAX_MULTI_BLAS_N >= 3 case 3: mixedMultiBlas<3, multicaxpyz_, write<0, 0, 0, 1>>(a, b, c, x, y, x, z); break; #if MAX_MULTI_BLAS_N >= 4 case 4: mixedMultiBlas<4, multicaxpyz_, write<0, 0, 0, 1>>(a, b, c, x, y, x, z); break; #if MAX_MULTI_BLAS_N >= 5 case 5: mixedMultiBlas<5, multicaxpyz_, write<0, 0, 0, 1>>(a, b, c, x, y, x, z); break; #if MAX_MULTI_BLAS_N >= 6 case 6: mixedMultiBlas<6, multicaxpyz_, write<0, 0, 0, 1>>(a, b, c, x, y, x, z); break; #if MAX_MULTI_BLAS_N >= 7 case 7: mixedMultiBlas<7, multicaxpyz_, write<0, 0, 0, 1>>(a, b, c, x, y, x, z); break; #if MAX_MULTI_BLAS_N >= 8 case 8: mixedMultiBlas<8, multicaxpyz_, write<0, 0, 0, 1>>(a, b, c, x, y, x, z); break; #if MAX_MULTI_BLAS_N >= 9 case 9: mixedMultiBlas<9, multicaxpyz_, write<0, 0, 0, 1>>(a, b, c, x, y, x, z); break; #if MAX_MULTI_BLAS_N >= 10 case 10: mixedMultiBlas<10, multicaxpyz_, write<0, 0, 0, 1>>(a, b, c, x, y, x, z); break; #if MAX_MULTI_BLAS_N >= 11 case 11: mixedMultiBlas<11, multicaxpyz_, write<0, 0, 0, 1>>(a, b, c, x, y, x, z); break; #if MAX_MULTI_BLAS_N >= 12 case 12: mixedMultiBlas<12, multicaxpyz_, write<0, 0, 0, 1>>(a, b, c, x, y, x, z); break; #if MAX_MULTI_BLAS_N >= 13 case 13: mixedMultiBlas<13, multicaxpyz_, write<0, 0, 0, 1>>(a, b, c, x, y, x, z); break; #if MAX_MULTI_BLAS_N >= 14 case 14: mixedMultiBlas<14, multicaxpyz_, write<0, 0, 0, 1>>(a, b, c, x, y, x, z); break; #if MAX_MULTI_BLAS_N >= 15 case 15: mixedMultiBlas<15, multicaxpyz_, write<0, 0, 0, 1>>(a, b, c, x, y, x, z); break; #if MAX_MULTI_BLAS_N >= 16 case 16: mixedMultiBlas<16, multicaxpyz_, write<0, 0, 0, 1>>(a, b, c, x, y, x, z); break; #endif // 16 #endif // 15 #endif // 14 #endif // 13 #endif // 12 #endif // 11 #endif // 10 #endif // 9 #endif // 8 #endif // 7 #endif // 6 #endif // 5 #endif // 4 #endif // 3 #endif // 2 default: // split the problem in half and recurse const Complex *a0 = &a_[0]; const Complex *a1 = &a_[(x.size()/2)*y.size()]; std::vector<ColorSpinorField*> x0(x.begin(), x.begin() + x.size()/2); std::vector<ColorSpinorField*> x1(x.begin() + x.size()/2, x.end()); caxpyz_recurse(a0, x0, y, z, 2*i+0, j, pass, upper); caxpyz_recurse(a1, x1, y, z, 2*i+1, j, pass, upper); break; } } } // end if (y.size() > MAX_MULTI_BLAS_N) } void caxpyz(const Complex *a, std::vector<ColorSpinorField*> &x, std::vector<ColorSpinorField*> &y, std::vector<ColorSpinorField*> &z) { // first pass does the caxpyz on the diagonal caxpyz_recurse(a, x, y, z, 0, 0, 0, 0); // second pass does caxpy on the off diagonals caxpyz_recurse(a, x, y, z, 0, 0, 1, 0); } void caxpyz_U(const Complex *a, std::vector<ColorSpinorField*> &x, std::vector<ColorSpinorField*> &y, std::vector<ColorSpinorField*> &z) { // a is upper triangular. // first pass does the caxpyz on the diagonal caxpyz_recurse(a, x, y, z, 0, 0, 0, 1); // second pass does caxpy on the off diagonals caxpyz_recurse(a, x, y, z, 0, 0, 1, 1); } void caxpyz_L(const Complex *a, std::vector<ColorSpinorField*> &x, std::vector<ColorSpinorField*> &y, std::vector<ColorSpinorField*> &z) { // a is upper triangular. // first pass does the caxpyz on the diagonal caxpyz_recurse(a, x, y, z, 0, 0, 0, -1); // second pass does caxpy on the off diagonals caxpyz_recurse(a, x, y, z, 0, 0, 1, -1); } void caxpyz(const Complex *a, ColorSpinorField &x, ColorSpinorField &y, ColorSpinorField &z) { caxpyz(a, x.Components(), y.Components(), z.Components()); } void caxpyz_U(const Complex *a, ColorSpinorField &x, ColorSpinorField &y, ColorSpinorField &z) { caxpyz_U(a, x.Components(), y.Components(), z.Components()); } void caxpyz_L(const Complex *a, ColorSpinorField &x, ColorSpinorField &y, ColorSpinorField &z) { caxpyz_L(a, x.Components(), y.Components(), z.Components()); } void axpyBzpcx(const double *a_, std::vector<ColorSpinorField*> &x_, std::vector<ColorSpinorField*> &y_, const double *b_, ColorSpinorField &z_, const double *c_) { if (y_.size() <= MAX_MULTI_BLAS_N) { // swizzle order since we are writing to x_ and y_, but the // multi-blas only allow writing to y and w, and moreover the // block width of y and w must match, and x and z must match. std::vector<ColorSpinorField*> &y = y_; std::vector<ColorSpinorField*> &w = x_; // wrap a container around the third solo vector std::vector<ColorSpinorField*> x; x.push_back(&z_); // we will curry the parameter arrays into the functor coeff_array<double> a(a_,false), b(b_,false), c(c_,false); if (x[0]->Precision() != y[0]->Precision() ) { mixedMultiBlas<1, multi_axpyBzpcx_, write<0, 1, 0, 1>>(a, b, c, x, y, x, w); } else { multiBlas<1, multi_axpyBzpcx_, write<0, 1, 0, 1>>(a, b, c, x, y, x, w); } } else { // split the problem in half and recurse const double *a0 = &a_[0]; const double *b0 = &b_[0]; const double *c0 = &c_[0]; std::vector<ColorSpinorField*> x0(x_.begin(), x_.begin() + x_.size()/2); std::vector<ColorSpinorField*> y0(y_.begin(), y_.begin() + y_.size()/2); axpyBzpcx(a0, x0, y0, b0, z_, c0); const double *a1 = &a_[y_.size()/2]; const double *b1 = &b_[y_.size()/2]; const double *c1 = &c_[y_.size()/2]; std::vector<ColorSpinorField*> x1(x_.begin() + x_.size()/2, x_.end()); std::vector<ColorSpinorField*> y1(y_.begin() + y_.size()/2, y_.end()); axpyBzpcx(a1, x1, y1, b1, z_, c1); } } void caxpyBxpz(const Complex *a_, std::vector<ColorSpinorField*> &x_, ColorSpinorField &y_, const Complex *b_, ColorSpinorField &z_) { const int xsize = x_.size(); if (xsize <= MAX_MULTI_BLAS_N) // only swizzle if we have to. { // swizzle order since we are writing to y_ and z_, but the // multi-blas only allow writing to y and w, and moreover the // block width of y and w must match, and x and z must match. // Also, wrap a container around them. std::vector<ColorSpinorField*> y; y.push_back(&y_); std::vector<ColorSpinorField*> w; w.push_back(&z_); // we're reading from x std::vector<ColorSpinorField*> &x = x_; // put a and b into constant space coeff_array<Complex> a(a_,true), b(b_,true), c; if (x[0]->Precision() != y[0]->Precision() ) { switch(xsize) { case 1: mixedMultiBlas<1, multi_caxpyBxpz_, write<0, 1, 0, 1>>(a, b, c, x, y, x, w); break; #if MAX_MULTI_BLAS_N >= 2 case 2: mixedMultiBlas<2, multi_caxpyBxpz_, write<0, 1, 0, 1>>(a, b, c, x, y, x, w); break; #if MAX_MULTI_BLAS_N >= 3 case 3: mixedMultiBlas<3, multi_caxpyBxpz_, write<0, 1, 0, 1>>(a, b, c, x, y, x, w); break; #if MAX_MULTI_BLAS_N >= 4 case 4: mixedMultiBlas<4, multi_caxpyBxpz_, write<0, 1, 0, 1>>(a, b, c, x, y, x, w); break; #if MAX_MULTI_BLAS_N >= 5 case 5: mixedMultiBlas<5, multi_caxpyBxpz_, write<0, 1, 0, 1>>(a, b, c, x, y, x, w); break; #if MAX_MULTI_BLAS_N >= 6 case 6: mixedMultiBlas<6, multi_caxpyBxpz_, write<0, 1, 0, 1>>(a, b, c, x, y, x, w); break; #if MAX_MULTI_BLAS_N >= 7 case 7: mixedMultiBlas<7, multi_caxpyBxpz_, write<0, 1, 0, 1>>(a, b, c, x, y, x, w); break; #if MAX_MULTI_BLAS_N >= 8 case 8: mixedMultiBlas<8, multi_caxpyBxpz_, write<0, 1, 0, 1>>(a, b, c, x, y, x, w); break; #if MAX_MULTI_BLAS_N >= 9 case 9: mixedMultiBlas<9, multi_caxpyBxpz_, write<0, 1, 0, 1>>(a, b, c, x, y, x, w); break; #if MAX_MULTI_BLAS_N >= 10 case 10: mixedMultiBlas<10, multi_caxpyBxpz_, write<0, 1, 0, 1>>(a, b, c, x, y, x, w); break; #if MAX_MULTI_BLAS_N >= 11 case 11: mixedMultiBlas<11, multi_caxpyBxpz_, write<0, 1, 0, 1>>(a, b, c, x, y, x, w); break; #if MAX_MULTI_BLAS_N >= 12 case 12: mixedMultiBlas<12, multi_caxpyBxpz_, write<0, 1, 0, 1>>(a, b, c, x, y, x, w); break; #if MAX_MULTI_BLAS_N >= 13 case 13: mixedMultiBlas<13, multi_caxpyBxpz_, write<0, 1, 0, 1>>(a, b, c, x, y, x, w); break; #if MAX_MULTI_BLAS_N >= 14 case 14: mixedMultiBlas<14, multi_caxpyBxpz_, write<0, 1, 0, 1>>(a, b, c, x, y, x, w); break; #if MAX_MULTI_BLAS_N >= 15 case 15: mixedMultiBlas<15, multi_caxpyBxpz_, write<0, 1, 0, 1>>(a, b, c, x, y, x, w); break; #if MAX_MULTI_BLAS_N >= 16 case 16: mixedMultiBlas<16, multi_caxpyBxpz_, write<0, 1, 0, 1>>(a, b, c, x, y, x, w); break; #endif // 16 #endif // 15 #endif // 14 #endif // 13 #endif // 12 #endif // 11 #endif // 10 #endif // 9 #endif // 8 #endif // 7 #endif // 6 #endif // 5 #endif // 4 #endif // 3 #endif // 2 default: // we can't hit the default, it ends up in the else below. break; } } else { switch(xsize) { case 1: multiBlas<1, multi_caxpyBxpz_, write<0, 1, 0, 1>>(a, b, c, x, y, x, w); break; #if MAX_MULTI_BLAS_N >= 2 case 2: multiBlas<2, multi_caxpyBxpz_, write<0, 1, 0, 1>>(a, b, c, x, y, x, w); break; #if MAX_MULTI_BLAS_N >= 3 case 3: multiBlas<3, multi_caxpyBxpz_, write<0, 1, 0, 1>>(a, b, c, x, y, x, w); break; #if MAX_MULTI_BLAS_N >= 4 case 4: multiBlas<4, multi_caxpyBxpz_, write<0, 1, 0, 1>>(a, b, c, x, y, x, w); break; #if MAX_MULTI_BLAS_N >= 5 case 5: multiBlas<5, multi_caxpyBxpz_, write<0, 1, 0, 1>>(a, b, c, x, y, x, w); break; #if MAX_MULTI_BLAS_N >= 6 case 6: multiBlas<6, multi_caxpyBxpz_, write<0, 1, 0, 1>>(a, b, c, x, y, x, w); break; #if MAX_MULTI_BLAS_N >= 7 case 7: multiBlas<7, multi_caxpyBxpz_, write<0, 1, 0, 1>>(a, b, c, x, y, x, w); break; #if MAX_MULTI_BLAS_N >= 8 case 8: multiBlas<8, multi_caxpyBxpz_, write<0, 1, 0, 1>>(a, b, c, x, y, x, w); break; #if MAX_MULTI_BLAS_N >= 9 case 9: multiBlas<9, multi_caxpyBxpz_, write<0, 1, 0, 1>>(a, b, c, x, y, x, w); break; #if MAX_MULTI_BLAS_N >= 10 case 10: multiBlas<10, multi_caxpyBxpz_, write<0, 1, 0, 1>>(a, b, c, x, y, x, w); break; #if MAX_MULTI_BLAS_N >= 11 case 11: multiBlas<11, multi_caxpyBxpz_, write<0, 1, 0, 1>>(a, b, c, x, y, x, w); break; #if MAX_MULTI_BLAS_N >= 12 case 12: multiBlas<12, multi_caxpyBxpz_, write<0, 1, 0, 1>>(a, b, c, x, y, x, w); break; #if MAX_MULTI_BLAS_N >= 13 case 13: multiBlas<13, multi_caxpyBxpz_, write<0, 1, 0, 1>>(a, b, c, x, y, x, w); break; #if MAX_MULTI_BLAS_N >= 14 case 14: multiBlas<14, multi_caxpyBxpz_, write<0, 1, 0, 1>>(a, b, c, x, y, x, w); break; #if MAX_MULTI_BLAS_N >= 15 case 15: multiBlas<15, multi_caxpyBxpz_, write<0, 1, 0, 1>>(a, b, c, x, y, x, w); break; #if MAX_MULTI_BLAS_N >= 16 case 16: multiBlas<16, multi_caxpyBxpz_, write<0, 1, 0, 1>>(a, b, c, x, y, x, w); break; #endif // 16 #endif // 15 #endif // 14 #endif // 13 #endif // 12 #endif // 11 #endif // 10 #endif // 9 #endif // 8 #endif // 7 #endif // 6 #endif // 5 #endif // 4 #endif // 3 #endif // 2 default: // we can't hit the default, it ends up in the else below. break; } } } else { // split the problem in half and recurse const Complex *a0 = &a_[0]; const Complex *b0 = &b_[0]; std::vector<ColorSpinorField*> x0(x_.begin(), x_.begin() + x_.size()/2); caxpyBxpz(a0, x0, y_, b0, z_); const Complex *a1 = &a_[x_.size()/2]; const Complex *b1 = &b_[x_.size()/2]; std::vector<ColorSpinorField*> x1(x_.begin() + x_.size()/2, x_.end()); caxpyBxpz(a1, x1, y_, b1, z_); } } } // namespace blas } // namespace quda
ae8df8874550797c472b5e284610d99510acf511.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /* Copyright 2009 NVIDIA Corporation. All rights reserved. NOTICE TO LICENSEE: This source code and/or documentation ("Licensed Deliverables") are subject to NVIDIA intellectual property rights under U.S. and international Copyright laws. These Licensed Deliverables contained herein is PROPRIETARY and CONFIDENTIAL to NVIDIA and is being provided under the terms and conditions of a form of NVIDIA software license agreement by and between NVIDIA and Licensee ("License Agreement") or electronically accepted by Licensee. Notwithstanding any terms or conditions to the contrary in the License Agreement, reproduction or disclosure of the Licensed Deliverables to any third party without the express written consent of NVIDIA is prohibited. NOTWITHSTANDING ANY TERMS OR CONDITIONS TO THE CONTRARY IN THE LICENSE AGREEMENT, NVIDIA MAKES NO REPRESENTATION ABOUT THE SUITABILITY OF THESE LICENSED DELIVERABLES FOR ANY PURPOSE. IT IS PROVIDED "AS IS" WITHOUT EXPRESS OR IMPLIED WARRANTY OF ANY KIND. NVIDIA DISCLAIMS ALL WARRANTIES WITH REGARD TO THESE LICENSED DELIVERABLES, INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY, NONINFRINGEMENT, AND FITNESS FOR A PARTICULAR PURPOSE. NOTWITHSTANDING ANY TERMS OR CONDITIONS TO THE CONTRARY IN THE LICENSE AGREEMENT, IN NO EVENT SHALL NVIDIA BE LIABLE FOR ANY SPECIAL, INDIRECT, INCIDENTAL, OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THESE LICENSED DELIVERABLES. U.S. Government End Users. These Licensed Deliverables are a "commercial item" as that term is defined at 48 C.F.R. 2.101 (OCT 1995), consisting of "commercial computer software" and "commercial computer software documentation" as such terms are used in 48 C.F.R. 12.212 (SEPT 1995) and is provided to the U.S. Government only as a commercial end item. Consistent with 48 C.F.R.12.212 and 48 C.F.R. 227.7202-1 through 227.7202-4 (JUNE 1995), all U.S. Government End Users acquire the Licensed Deliverables with only those rights set forth herein. Any use of the Licensed Deliverables in individual and commercial software must include, in the user documentation and internal comments to the code, the above Disclaimer and U.S. Government End Users Notice. */ /* * cuPrintf.cu * * This is a printf command callable from within a kernel. It is set * up so that output is sent to a memory buffer, which is emptied from * the host side - but only after a hipDeviceSynchronize() on the host. * * Currently, there is a limitation of around 200 characters of output * and no more than 10 arguments to a single cuPrintf() call. Issue * multiple calls if longer format strings are required. * * It requires minimal setup, and is *NOT* optimised for performance. * For example, writes are not coalesced - this is because there is an * assumption that people will not want to printf from every single one * of thousands of threads, but only from individual threads at a time. * * Using this is simple - it requires one host-side call to initialise * everything, and then kernels can call cuPrintf at will. Sample code * is the easiest way to demonstrate: * #include "cuPrintf.hip" __global__ void testKernel(int val) { cuPrintf("Value is: %d\n", val); } int main() { cudaPrintfInit(); testKernel<<< 2, 3 >>>(10); cudaPrintfDisplay(stdout, true); cudaPrintfEnd(); return 0; } * * See the header file, "cuPrintf.cuh" for more info, especially * arguments to cudaPrintfInit() and cudaPrintfDisplay(); */ #ifndef CUPRINTF_CU #define CUPRINTF_CU #include "cuPrintf_hip.cuh" #if __CUDA_ARCH__ > 100 // Atomics only used with > sm_10 architecture #include <sm_11_atomic_functions.h> #endif // This is the smallest amount of memory, per-thread, which is allowed. // It is also the largest amount of space a single printf() can take up const static int CUPRINTF_MAX_LEN = 256; // This structure is used internally to track block/thread output restrictions. typedef struct __align__(8) { int threadid; // CUPRINTF_UNRESTRICTED for unrestricted int blockid; // CUPRINTF_UNRESTRICTED for unrestricted } cuPrintfRestriction; // The main storage is in a global print buffer, which has a known // start/end/length. These are atomically updated so it works as a // circular buffer. // Since the only control primitive that can be used is atomicAdd(), // we cannot wrap the pointer as such. The actual address must be // calculated from printfBufferPtr by mod-ing with printfBufferLength. // For sm_10 architecture, we must subdivide the buffer per-thread // since we do not even have an atomic primitive. __constant__ static char *globalPrintfBuffer = NULL; // Start of circular buffer (set up by host) __constant__ static int printfBufferLength = 0; // Size of circular buffer (set up by host) __device__ static cuPrintfRestriction restrictRules; // Output restrictions __device__ volatile static char *printfBufferPtr = NULL; // Current atomically-incremented non-wrapped offset // This is the header preceeding all printf entries. // NOTE: It *must* be size-aligned to the maximum entity size (size_t) typedef struct __align__(8) { unsigned short magic; // Magic number says we're valid unsigned short fmtoffset; // Offset of fmt string into buffer unsigned short blockid; // Block ID of author unsigned short threadid; // Thread ID of author } cuPrintfHeader; // Special header for sm_10 architecture #define CUPRINTF_SM10_MAGIC 0xC810 // Not a valid ascii character typedef struct __align__(16) { unsigned short magic; // sm_10 specific magic number unsigned short unused; unsigned int thread_index; // thread ID for this buffer unsigned int thread_buf_len; // per-thread buffer length unsigned int offset; // most recent printf's offset } cuPrintfHeaderSM10; // Because we can't write an element which is not aligned to its bit-size, // we have to align all sizes and variables on maximum-size boundaries. // That means sizeof(double) in this case, but we'll use (long long) for // better arch<1.3 support #define CUPRINTF_ALIGN_SIZE sizeof(long long) // All our headers are prefixed with a magic number so we know they're ready #define CUPRINTF_SM11_MAGIC (unsigned short)0xC811 // Not a valid ascii character // // getNextPrintfBufPtr // // Grabs a block of space in the general circular buffer, using an // atomic function to ensure that it's ours. We handle wrapping // around the circular buffer and return a pointer to a place which // can be written to. // // Important notes: // 1. We always grab CUPRINTF_MAX_LEN bytes // 2. Because of 1, we never worry about wrapping around the end // 3. Because of 1, printfBufferLength *must* be a factor of CUPRINTF_MAX_LEN // // This returns a pointer to the place where we own. // __device__ static char *getNextPrintfBufPtr() { // Initialisation check if(!printfBufferPtr) return NULL; // Thread/block restriction check if((restrictRules.blockid != CUPRINTF_UNRESTRICTED) && (restrictRules.blockid != (blockIdx.x + gridDim.x*blockIdx.y))) return NULL; if((restrictRules.threadid != CUPRINTF_UNRESTRICTED) && (restrictRules.threadid != (threadIdx.x + blockDim.x*threadIdx.y + blockDim.x*blockDim.y*threadIdx.z))) return NULL; // Conditional section, dependent on architecture #if __CUDA_ARCH__ == 100 // For sm_10 architectures, we have no atomic add - this means we must split the // entire available buffer into per-thread blocks. Inefficient, but what can you do. int thread_count = (gridDim.x * gridDim.y) * (blockDim.x * blockDim.y * blockDim.z); int thread_index = threadIdx.x + blockDim.x*threadIdx.y + blockDim.x*blockDim.y*threadIdx.z + (blockIdx.x + gridDim.x*blockIdx.y) * (blockDim.x * blockDim.y * blockDim.z); // Find our own block of data and go to it. Make sure the per-thread length // is a precise multiple of CUPRINTF_MAX_LEN, otherwise we risk size and // alignment issues! We must round down, of course. unsigned int thread_buf_len = printfBufferLength / thread_count; thread_buf_len &= ~(CUPRINTF_MAX_LEN-1); // We *must* have a thread buffer length able to fit at least two printfs (one header, one real) if(thread_buf_len < (CUPRINTF_MAX_LEN * 2)) return NULL; // Now address our section of the buffer. The first item is a header. char *myPrintfBuffer = globalPrintfBuffer + (thread_buf_len * thread_index); cuPrintfHeaderSM10 hdr = *(cuPrintfHeaderSM10 *)(void *)myPrintfBuffer; if(hdr.magic != CUPRINTF_SM10_MAGIC) { // If our header is not set up, initialise it hdr.magic = CUPRINTF_SM10_MAGIC; hdr.thread_index = thread_index; hdr.thread_buf_len = thread_buf_len; hdr.offset = 0; // Note we start at 0! We pre-increment below. *(cuPrintfHeaderSM10 *)(void *)myPrintfBuffer = hdr; // Write back the header // For initial setup purposes, we might need to init thread0's header too // (so that cudaPrintfDisplay() below will work). This is only run once. cuPrintfHeaderSM10 *tophdr = (cuPrintfHeaderSM10 *)(void *)globalPrintfBuffer; tophdr->thread_buf_len = thread_buf_len; } // Adjust the offset by the right amount, and wrap it if need be unsigned int offset = hdr.offset + CUPRINTF_MAX_LEN; if(offset >= hdr.thread_buf_len) offset = CUPRINTF_MAX_LEN; // Write back the new offset for next time and return a pointer to it ((cuPrintfHeaderSM10 *)(void *)myPrintfBuffer)->offset = offset; return myPrintfBuffer + offset; #else // Much easier with an atomic operation! size_t offset = atomicAdd((unsigned int *)&printfBufferPtr, CUPRINTF_MAX_LEN) - (size_t)globalPrintfBuffer; offset %= printfBufferLength; return globalPrintfBuffer + offset; #endif } // // writePrintfHeader // // Inserts the header for containing our UID, fmt position and // block/thread number. We generate it dynamically to avoid // issues arising from requiring pre-initialisation. // __device__ static void writePrintfHeader(char *ptr, char *fmtptr) { if(ptr) { cuPrintfHeader header; header.magic = CUPRINTF_SM11_MAGIC; header.fmtoffset = (unsigned short)(fmtptr - ptr); header.blockid = blockIdx.x + gridDim.x*blockIdx.y; header.threadid = threadIdx.x + blockDim.x*threadIdx.y + blockDim.x*blockDim.y*threadIdx.z; *(cuPrintfHeader *)(void *)ptr = header; } } // // cuPrintfStrncpy // // This special strncpy outputs an aligned length value, followed by the // string. It then zero-pads the rest of the string until a 64-aligned // boundary. The length *includes* the padding. A pointer to the byte // just after the \0 is returned. // // This function could overflow CUPRINTF_MAX_LEN characters in our buffer. // To avoid it, we must count as we output and truncate where necessary. // __device__ static char *cuPrintfStrncpy(char *dest, const char *src, int n, char *end) { // Initialisation and overflow check if(!dest || !src || (dest >= end)) return NULL; // Prepare to write the length specifier. We're guaranteed to have // at least "CUPRINTF_ALIGN_SIZE" bytes left because we only write out in // chunks that size, and CUPRINTF_MAX_LEN is aligned with CUPRINTF_ALIGN_SIZE. int *lenptr = (int *)(void *)dest; int len = 0; dest += CUPRINTF_ALIGN_SIZE; // Now copy the string while(n--) { if(dest >= end) // Overflow check break; len++; *dest++ = *src; if(*src++ == '\0') break; } // Now write out the padding bytes, and we have our length. while((dest < end) && (((long)dest & (CUPRINTF_ALIGN_SIZE-1)) != 0)) { len++; *dest++ = 0; } *lenptr = len; return (dest < end) ? dest : NULL; // Overflow means return NULL } // // copyArg // // This copies a length specifier and then the argument out to the // data buffer. Templates let the compiler figure all this out at // compile-time, making life much simpler from the programming // point of view. I'm assuimg all (const char *) is a string, and // everything else is the variable it points at. I'd love to see // a better way of doing it, but aside from parsing the format // string I can't think of one. // // The length of the data type is inserted at the beginning (so that // the display can distinguish between float and double), and the // pointer to the end of the entry is returned. // __device__ static char *copyArg(char *ptr, const char *arg, char *end) { // Initialisation check if(!ptr || !arg) return NULL; // strncpy does all our work. We just terminate. if((ptr = cuPrintfStrncpy(ptr, arg, CUPRINTF_MAX_LEN, end)) != NULL) *ptr = 0; return ptr; } template <typename T> __device__ static char *copyArg(char *ptr, T &arg, char *end) { // Initisalisation and overflow check. Alignment rules mean that // we're at least CUPRINTF_ALIGN_SIZE away from "end", so we only need // to check that one offset. if(!ptr || ((ptr+CUPRINTF_ALIGN_SIZE) >= end)) return NULL; // Write the length and argument *(int *)(void *)ptr = sizeof(arg); ptr += CUPRINTF_ALIGN_SIZE; *(T *)(void *)ptr = arg; ptr += CUPRINTF_ALIGN_SIZE; *ptr = 0; return ptr; } // // cuPrintf // // Templated printf functions to handle multiple arguments. // Note we return the total amount of data copied, not the number // of characters output. But then again, who ever looks at the // return from printf() anyway? // // The format is to grab a block of circular buffer space, the // start of which will hold a header and a pointer to the format // string. We then write in all the arguments, and finally the // format string itself. This is to make it easy to prevent // overflow of our buffer (we support up to 10 arguments, each of // which can be 12 bytes in length - that means that only the // format string (or a %s) can actually overflow; so the overflow // check need only be in the strcpy function. // // The header is written at the very last because that's what // makes it look like we're done. // // Errors, which are basically lack-of-initialisation, are ignored // in the called functions because NULL pointers are passed around // // All printf variants basically do the same thing, setting up the // buffer, writing all arguments, then finalising the header. For // clarity, we'll pack the code into some big macros. #define CUPRINTF_PREAMBLE \ char *start, *end, *bufptr, *fmtstart; \ if((start = getNextPrintfBufPtr()) == NULL) return 0; \ end = start + CUPRINTF_MAX_LEN; \ bufptr = start + sizeof(cuPrintfHeader); // Posting an argument is easy #define CUPRINTF_ARG(argname) \ bufptr = copyArg(bufptr, argname, end); // After args are done, record start-of-fmt and write the fmt and header #define CUPRINTF_POSTAMBLE \ fmtstart = bufptr; \ end = cuPrintfStrncpy(bufptr, fmt, CUPRINTF_MAX_LEN, end); \ writePrintfHeader(start, end ? fmtstart : NULL); \ return end ? (int)(end - start) : 0; __device__ int cuPrintf(const char *fmt) { CUPRINTF_PREAMBLE; CUPRINTF_POSTAMBLE; } template <typename T1> __device__ int cuPrintf(const char *fmt, T1 arg1) { CUPRINTF_PREAMBLE; CUPRINTF_ARG(arg1); CUPRINTF_POSTAMBLE; } template <typename T1, typename T2> __device__ int cuPrintf(const char *fmt, T1 arg1, T2 arg2) { CUPRINTF_PREAMBLE; CUPRINTF_ARG(arg1); CUPRINTF_ARG(arg2); CUPRINTF_POSTAMBLE; } template <typename T1, typename T2, typename T3> __device__ int cuPrintf(const char *fmt, T1 arg1, T2 arg2, T3 arg3) { CUPRINTF_PREAMBLE; CUPRINTF_ARG(arg1); CUPRINTF_ARG(arg2); CUPRINTF_ARG(arg3); CUPRINTF_POSTAMBLE; } template <typename T1, typename T2, typename T3, typename T4> __device__ int cuPrintf(const char *fmt, T1 arg1, T2 arg2, T3 arg3, T4 arg4) { CUPRINTF_PREAMBLE; CUPRINTF_ARG(arg1); CUPRINTF_ARG(arg2); CUPRINTF_ARG(arg3); CUPRINTF_ARG(arg4); CUPRINTF_POSTAMBLE; } template <typename T1, typename T2, typename T3, typename T4, typename T5> __device__ int cuPrintf(const char *fmt, T1 arg1, T2 arg2, T3 arg3, T4 arg4, T5 arg5) { CUPRINTF_PREAMBLE; CUPRINTF_ARG(arg1); CUPRINTF_ARG(arg2); CUPRINTF_ARG(arg3); CUPRINTF_ARG(arg4); CUPRINTF_ARG(arg5); CUPRINTF_POSTAMBLE; } template <typename T1, typename T2, typename T3, typename T4, typename T5, typename T6> __device__ int cuPrintf(const char *fmt, T1 arg1, T2 arg2, T3 arg3, T4 arg4, T5 arg5, T6 arg6) { CUPRINTF_PREAMBLE; CUPRINTF_ARG(arg1); CUPRINTF_ARG(arg2); CUPRINTF_ARG(arg3); CUPRINTF_ARG(arg4); CUPRINTF_ARG(arg5); CUPRINTF_ARG(arg6); CUPRINTF_POSTAMBLE; } template <typename T1, typename T2, typename T3, typename T4, typename T5, typename T6, typename T7> __device__ int cuPrintf(const char *fmt, T1 arg1, T2 arg2, T3 arg3, T4 arg4, T5 arg5, T6 arg6, T7 arg7) { CUPRINTF_PREAMBLE; CUPRINTF_ARG(arg1); CUPRINTF_ARG(arg2); CUPRINTF_ARG(arg3); CUPRINTF_ARG(arg4); CUPRINTF_ARG(arg5); CUPRINTF_ARG(arg6); CUPRINTF_ARG(arg7); CUPRINTF_POSTAMBLE; } template <typename T1, typename T2, typename T3, typename T4, typename T5, typename T6, typename T7, typename T8> __device__ int cuPrintf(const char *fmt, T1 arg1, T2 arg2, T3 arg3, T4 arg4, T5 arg5, T6 arg6, T7 arg7, T8 arg8) { CUPRINTF_PREAMBLE; CUPRINTF_ARG(arg1); CUPRINTF_ARG(arg2); CUPRINTF_ARG(arg3); CUPRINTF_ARG(arg4); CUPRINTF_ARG(arg5); CUPRINTF_ARG(arg6); CUPRINTF_ARG(arg7); CUPRINTF_ARG(arg8); CUPRINTF_POSTAMBLE; } template <typename T1, typename T2, typename T3, typename T4, typename T5, typename T6, typename T7, typename T8, typename T9> __device__ int cuPrintf(const char *fmt, T1 arg1, T2 arg2, T3 arg3, T4 arg4, T5 arg5, T6 arg6, T7 arg7, T8 arg8, T9 arg9) { CUPRINTF_PREAMBLE; CUPRINTF_ARG(arg1); CUPRINTF_ARG(arg2); CUPRINTF_ARG(arg3); CUPRINTF_ARG(arg4); CUPRINTF_ARG(arg5); CUPRINTF_ARG(arg6); CUPRINTF_ARG(arg7); CUPRINTF_ARG(arg8); CUPRINTF_ARG(arg9); CUPRINTF_POSTAMBLE; } template <typename T1, typename T2, typename T3, typename T4, typename T5, typename T6, typename T7, typename T8, typename T9, typename T10> __device__ int cuPrintf(const char *fmt, T1 arg1, T2 arg2, T3 arg3, T4 arg4, T5 arg5, T6 arg6, T7 arg7, T8 arg8, T9 arg9, T10 arg10) { CUPRINTF_PREAMBLE; CUPRINTF_ARG(arg1); CUPRINTF_ARG(arg2); CUPRINTF_ARG(arg3); CUPRINTF_ARG(arg4); CUPRINTF_ARG(arg5); CUPRINTF_ARG(arg6); CUPRINTF_ARG(arg7); CUPRINTF_ARG(arg8); CUPRINTF_ARG(arg9); CUPRINTF_ARG(arg10); CUPRINTF_POSTAMBLE; } #undef CUPRINTF_PREAMBLE #undef CUPRINTF_ARG #undef CUPRINTF_POSTAMBLE // // cuPrintfRestrict // // Called to restrict output to a given thread/block. // We store the info in "restrictRules", which is set up at // init time by the host. It's not the cleanest way to do this // because it means restrictions will last between // invocations, but given the output-pointer continuity, // I feel this is reasonable. // __device__ void cuPrintfRestrict(int threadid, int blockid) { int thread_count = blockDim.x * blockDim.y * blockDim.z; if(((threadid < thread_count) && (threadid >= 0)) || (threadid == CUPRINTF_UNRESTRICTED)) restrictRules.threadid = threadid; int block_count = gridDim.x * gridDim.y; if(((blockid < block_count) && (blockid >= 0)) || (blockid == CUPRINTF_UNRESTRICTED)) restrictRules.blockid = blockid; } /////////////////////////////////////////////////////////////////////////////// // HOST SIDE #include <stdio.h> static FILE *printf_fp; static char *printfbuf_start=NULL; static char *printfbuf_device=NULL; static int printfbuf_len=0; // // outputPrintfData // // Our own internal function, which takes a pointer to a data buffer // and passes it through libc's printf for output. // // We receive the formate string and a pointer to where the data is // held. We then run through and print it out. // // Returns 0 on failure, 1 on success // static int outputPrintfData(char *fmt, char *data) { // Format string is prefixed by a length that we don't need fmt += CUPRINTF_ALIGN_SIZE; // Now run through it, printing everything we can. We must // run to every % character, extract only that, and use printf // to format it. char *p = strchr(fmt, '%'); while(p != NULL) { // Print up to the % character *p = '\0'; fputs(fmt, printf_fp); *p = '%'; // Put back the % // Now handle the format specifier char *format = p++; // Points to the '%' p += strcspn(p, "%cdiouxXeEfgGaAnps"); if(*p == '\0') // If no format specifier, print the whole thing { fmt = format; break; } // Cut out the format bit and use printf to print it. It's prefixed // by its length. int arglen = *(int *)data; if(arglen > CUPRINTF_MAX_LEN) { fputs("Corrupt printf buffer data - aborting\n", printf_fp); return 0; } data += CUPRINTF_ALIGN_SIZE; char specifier = *p++; char c = *p; // Store for later *p = '\0'; switch(specifier) { // These all take integer arguments case 'c': case 'd': case 'i': case 'o': case 'u': case 'x': case 'X': case 'p': fprintf(printf_fp, format, *((int *)data)); break; // These all take double arguments case 'e': case 'E': case 'f': case 'g': case 'G': case 'a': case 'A': if(arglen == 4) // Float vs. Double thing fprintf(printf_fp, format, *((float *)data)); else fprintf(printf_fp, format, *((double *)data)); break; // Strings are handled in a special way case 's': fprintf(printf_fp, format, (char *)data); break; // % is special case '%': fprintf(printf_fp, "%%"); break; // Everything else is just printed out as-is default: fprintf(printf_fp, format); break; } data += CUPRINTF_ALIGN_SIZE; // Move on to next argument *p = c; // Restore what we removed fmt = p; // Adjust fmt string to be past the specifier p = strchr(fmt, '%'); // and get the next specifier } // Print out the last of the string fputs(fmt, printf_fp); return 1; } // // doPrintfDisplay // // This runs through the blocks of CUPRINTF_MAX_LEN-sized data, calling the // print function above to display them. We've got this separate from // cudaPrintfDisplay() below so we can handle the SM_10 architecture // partitioning. // static int doPrintfDisplay(int headings, int clear, char *bufstart, char *bufend, char *bufptr, char *endptr) { // Grab, piece-by-piece, each output element until we catch // up with the circular buffer end pointer int printf_count=0; char printfbuf_local[CUPRINTF_MAX_LEN+1]; printfbuf_local[CUPRINTF_MAX_LEN] = '\0'; while(bufptr != endptr) { // Wrap ourselves at the end-of-buffer if(bufptr == bufend) bufptr = bufstart; // Adjust our start pointer to within the circular buffer and copy a block. hipMemcpy(printfbuf_local, bufptr, CUPRINTF_MAX_LEN, hipMemcpyDeviceToHost); // If the magic number isn't valid, then this write hasn't gone through // yet and we'll wait until it does (or we're past the end for non-async printfs). cuPrintfHeader *hdr = (cuPrintfHeader *)printfbuf_local; if((hdr->magic != CUPRINTF_SM11_MAGIC) || (hdr->fmtoffset >= CUPRINTF_MAX_LEN)) { //fprintf(printf_fp, "Bad magic number in printf header\n"); break; } // Extract all the info and get this printf done if(headings) fprintf(printf_fp, "[%d, %d]: ", hdr->blockid, hdr->threadid); if(hdr->fmtoffset == 0) fprintf(printf_fp, "printf buffer overflow\n"); else if(!outputPrintfData(printfbuf_local+hdr->fmtoffset, printfbuf_local+sizeof(cuPrintfHeader))) break; printf_count++; // Clear if asked if(clear) hipMemset(bufptr, 0, CUPRINTF_MAX_LEN); // Now advance our start location, because we're done, and keep copying bufptr += CUPRINTF_MAX_LEN; } return printf_count; } // // cudaPrintfInit // // Takes a buffer length to allocate, creates the memory on the device and // returns a pointer to it for when a kernel is called. It's up to the caller // to free it. // extern "C" hipError_t cudaPrintfInit(size_t bufferLen) { // Fix up bufferlen to be a multiple of CUPRINTF_MAX_LEN bufferLen = (bufferLen < CUPRINTF_MAX_LEN) ? CUPRINTF_MAX_LEN : bufferLen; if((bufferLen % CUPRINTF_MAX_LEN) > 0) bufferLen += (CUPRINTF_MAX_LEN - (bufferLen % CUPRINTF_MAX_LEN)); printfbuf_len = (int)bufferLen; // Allocate a print buffer on the device and zero it if(hipMalloc((void **)&printfbuf_device, printfbuf_len) != hipSuccess) return hipErrorInitializationError; hipMemset(printfbuf_device, 0, printfbuf_len); printfbuf_start = printfbuf_device; // Where we start reading from // No restrictions to begin with cuPrintfRestriction restrict; restrict.threadid = restrict.blockid = CUPRINTF_UNRESTRICTED; hipMemcpyToSymbol(restrictRules, &restrict, sizeof(restrict)); // Initialise the buffer and the respective lengths/pointers. hipMemcpyToSymbol(globalPrintfBuffer, &printfbuf_device, sizeof(char *)); hipMemcpyToSymbol(printfBufferPtr, &printfbuf_device, sizeof(char *)); hipMemcpyToSymbol(printfBufferLength, &printfbuf_len, sizeof(printfbuf_len)); return hipSuccess; } // // cudaPrintfEnd // // Frees up the memory which we allocated // extern "C" void cudaPrintfEnd() { if(!printfbuf_start || !printfbuf_device) return; hipFree(printfbuf_device); printfbuf_start = printfbuf_device = NULL; } // // cudaPrintfDisplay // // Each call to this function dumps the entire current contents // of the printf buffer to the pre-specified FILE pointer. The // circular "start" pointer is advanced so that subsequent calls // dumps only new stuff. // // In the case of async memory access (via streams), call this // repeatedly to keep trying to empty the buffer. If it's a sync // access, then the whole buffer should empty in one go. // // Arguments: // outputFP - File descriptor to output to (NULL => stdout) // showThreadID - If true, prints [block,thread] before each line // extern "C" hipError_t cudaPrintfDisplay(void *outputFP, bool showThreadID) { printf_fp = (FILE *)((outputFP == NULL) ? stdout : outputFP); // For now, we force "synchronous" mode which means we're not concurrent // with kernel execution. This also means we don't need clearOnPrint. // If you're patching it for async operation, here's where you want it. bool sync_printfs = true; bool clearOnPrint = false; // Initialisation check if(!printfbuf_start || !printfbuf_device || !printf_fp) return hipErrorMissingConfiguration; // To determine which architecture we're using, we read the // first short from the buffer - it'll be the magic number // relating to the version. unsigned short magic; hipMemcpy(&magic, printfbuf_device, sizeof(unsigned short), hipMemcpyDeviceToHost); // For SM_10 architecture, we've split our buffer into one-per-thread. // That means we must do each thread block separately. It'll require // extra reading. We also, for now, don't support async printfs because // that requires tracking one start pointer per thread. if(magic == CUPRINTF_SM10_MAGIC) { sync_printfs = true; clearOnPrint = false; int blocklen = 0; char *blockptr = printfbuf_device; while(blockptr < (printfbuf_device + printfbuf_len)) { cuPrintfHeaderSM10 hdr; hipMemcpy(&hdr, blockptr, sizeof(hdr), hipMemcpyDeviceToHost); // We get our block-size-step from the very first header if(hdr.thread_buf_len != 0) blocklen = hdr.thread_buf_len; // No magic number means no printfs from this thread if(hdr.magic != CUPRINTF_SM10_MAGIC) { if(blocklen == 0) { fprintf(printf_fp, "No printf headers found at all!\n"); break; // No valid headers! } blockptr += blocklen; continue; } // "offset" is non-zero then we can print the block contents if(hdr.offset > 0) { // For synchronous printfs, we must print from endptr->bufend, then from start->end if(sync_printfs) doPrintfDisplay(showThreadID, clearOnPrint, blockptr+CUPRINTF_MAX_LEN, blockptr+hdr.thread_buf_len, blockptr+hdr.offset+CUPRINTF_MAX_LEN, blockptr+hdr.thread_buf_len); doPrintfDisplay(showThreadID, clearOnPrint, blockptr+CUPRINTF_MAX_LEN, blockptr+hdr.thread_buf_len, blockptr+CUPRINTF_MAX_LEN, blockptr+hdr.offset+CUPRINTF_MAX_LEN); } // Move on to the next block and loop again blockptr += hdr.thread_buf_len; } } // For SM_11 and up, everything is a single buffer and it's simple else if(magic == CUPRINTF_SM11_MAGIC) { // Grab the current "end of circular buffer" pointer. char *printfbuf_end = NULL; hipMemcpyFromSymbol(&printfbuf_end, printfBufferPtr, sizeof(char *)); // Adjust our starting and ending pointers to within the block char *bufptr = ((printfbuf_start - printfbuf_device) % printfbuf_len) + printfbuf_device; char *endptr = ((printfbuf_end - printfbuf_device) % printfbuf_len) + printfbuf_device; // For synchronous (i.e. after-kernel-exit) printf display, we have to handle circular // buffer wrap carefully because we could miss those past "end". if(sync_printfs) doPrintfDisplay(showThreadID, clearOnPrint, printfbuf_device, printfbuf_device+printfbuf_len, endptr, printfbuf_device+printfbuf_len); doPrintfDisplay(showThreadID, clearOnPrint, printfbuf_device, printfbuf_device+printfbuf_len, bufptr, endptr); printfbuf_start = printfbuf_end; } else ;//printf("Bad magic number in cuPrintf buffer header\n"); // If we were synchronous, then we must ensure that the memory is cleared on exit // otherwise another kernel launch with a different grid size could conflict. if(sync_printfs) hipMemset(printfbuf_device, 0, printfbuf_len); return hipSuccess; } // Cleanup #undef CUPRINTF_MAX_LEN #undef CUPRINTF_ALIGN_SIZE #undef CUPRINTF_SM10_MAGIC #undef CUPRINTF_SM11_MAGIC #endif
ae8df8874550797c472b5e284610d99510acf511.cu
/* Copyright 2009 NVIDIA Corporation. All rights reserved. NOTICE TO LICENSEE: This source code and/or documentation ("Licensed Deliverables") are subject to NVIDIA intellectual property rights under U.S. and international Copyright laws. These Licensed Deliverables contained herein is PROPRIETARY and CONFIDENTIAL to NVIDIA and is being provided under the terms and conditions of a form of NVIDIA software license agreement by and between NVIDIA and Licensee ("License Agreement") or electronically accepted by Licensee. Notwithstanding any terms or conditions to the contrary in the License Agreement, reproduction or disclosure of the Licensed Deliverables to any third party without the express written consent of NVIDIA is prohibited. NOTWITHSTANDING ANY TERMS OR CONDITIONS TO THE CONTRARY IN THE LICENSE AGREEMENT, NVIDIA MAKES NO REPRESENTATION ABOUT THE SUITABILITY OF THESE LICENSED DELIVERABLES FOR ANY PURPOSE. IT IS PROVIDED "AS IS" WITHOUT EXPRESS OR IMPLIED WARRANTY OF ANY KIND. NVIDIA DISCLAIMS ALL WARRANTIES WITH REGARD TO THESE LICENSED DELIVERABLES, INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY, NONINFRINGEMENT, AND FITNESS FOR A PARTICULAR PURPOSE. NOTWITHSTANDING ANY TERMS OR CONDITIONS TO THE CONTRARY IN THE LICENSE AGREEMENT, IN NO EVENT SHALL NVIDIA BE LIABLE FOR ANY SPECIAL, INDIRECT, INCIDENTAL, OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THESE LICENSED DELIVERABLES. U.S. Government End Users. These Licensed Deliverables are a "commercial item" as that term is defined at 48 C.F.R. 2.101 (OCT 1995), consisting of "commercial computer software" and "commercial computer software documentation" as such terms are used in 48 C.F.R. 12.212 (SEPT 1995) and is provided to the U.S. Government only as a commercial end item. Consistent with 48 C.F.R.12.212 and 48 C.F.R. 227.7202-1 through 227.7202-4 (JUNE 1995), all U.S. Government End Users acquire the Licensed Deliverables with only those rights set forth herein. Any use of the Licensed Deliverables in individual and commercial software must include, in the user documentation and internal comments to the code, the above Disclaimer and U.S. Government End Users Notice. */ /* * cuPrintf.cu * * This is a printf command callable from within a kernel. It is set * up so that output is sent to a memory buffer, which is emptied from * the host side - but only after a cudaThreadSynchronize() on the host. * * Currently, there is a limitation of around 200 characters of output * and no more than 10 arguments to a single cuPrintf() call. Issue * multiple calls if longer format strings are required. * * It requires minimal setup, and is *NOT* optimised for performance. * For example, writes are not coalesced - this is because there is an * assumption that people will not want to printf from every single one * of thousands of threads, but only from individual threads at a time. * * Using this is simple - it requires one host-side call to initialise * everything, and then kernels can call cuPrintf at will. Sample code * is the easiest way to demonstrate: * #include "cuPrintf.cu" __global__ void testKernel(int val) { cuPrintf("Value is: %d\n", val); } int main() { cudaPrintfInit(); testKernel<<< 2, 3 >>>(10); cudaPrintfDisplay(stdout, true); cudaPrintfEnd(); return 0; } * * See the header file, "cuPrintf.cuh" for more info, especially * arguments to cudaPrintfInit() and cudaPrintfDisplay(); */ #ifndef CUPRINTF_CU #define CUPRINTF_CU #include "cuPrintf.cuh" #if __CUDA_ARCH__ > 100 // Atomics only used with > sm_10 architecture #include <sm_11_atomic_functions.h> #endif // This is the smallest amount of memory, per-thread, which is allowed. // It is also the largest amount of space a single printf() can take up const static int CUPRINTF_MAX_LEN = 256; // This structure is used internally to track block/thread output restrictions. typedef struct __align__(8) { int threadid; // CUPRINTF_UNRESTRICTED for unrestricted int blockid; // CUPRINTF_UNRESTRICTED for unrestricted } cuPrintfRestriction; // The main storage is in a global print buffer, which has a known // start/end/length. These are atomically updated so it works as a // circular buffer. // Since the only control primitive that can be used is atomicAdd(), // we cannot wrap the pointer as such. The actual address must be // calculated from printfBufferPtr by mod-ing with printfBufferLength. // For sm_10 architecture, we must subdivide the buffer per-thread // since we do not even have an atomic primitive. __constant__ static char *globalPrintfBuffer = NULL; // Start of circular buffer (set up by host) __constant__ static int printfBufferLength = 0; // Size of circular buffer (set up by host) __device__ static cuPrintfRestriction restrictRules; // Output restrictions __device__ volatile static char *printfBufferPtr = NULL; // Current atomically-incremented non-wrapped offset // This is the header preceeding all printf entries. // NOTE: It *must* be size-aligned to the maximum entity size (size_t) typedef struct __align__(8) { unsigned short magic; // Magic number says we're valid unsigned short fmtoffset; // Offset of fmt string into buffer unsigned short blockid; // Block ID of author unsigned short threadid; // Thread ID of author } cuPrintfHeader; // Special header for sm_10 architecture #define CUPRINTF_SM10_MAGIC 0xC810 // Not a valid ascii character typedef struct __align__(16) { unsigned short magic; // sm_10 specific magic number unsigned short unused; unsigned int thread_index; // thread ID for this buffer unsigned int thread_buf_len; // per-thread buffer length unsigned int offset; // most recent printf's offset } cuPrintfHeaderSM10; // Because we can't write an element which is not aligned to its bit-size, // we have to align all sizes and variables on maximum-size boundaries. // That means sizeof(double) in this case, but we'll use (long long) for // better arch<1.3 support #define CUPRINTF_ALIGN_SIZE sizeof(long long) // All our headers are prefixed with a magic number so we know they're ready #define CUPRINTF_SM11_MAGIC (unsigned short)0xC811 // Not a valid ascii character // // getNextPrintfBufPtr // // Grabs a block of space in the general circular buffer, using an // atomic function to ensure that it's ours. We handle wrapping // around the circular buffer and return a pointer to a place which // can be written to. // // Important notes: // 1. We always grab CUPRINTF_MAX_LEN bytes // 2. Because of 1, we never worry about wrapping around the end // 3. Because of 1, printfBufferLength *must* be a factor of CUPRINTF_MAX_LEN // // This returns a pointer to the place where we own. // __device__ static char *getNextPrintfBufPtr() { // Initialisation check if(!printfBufferPtr) return NULL; // Thread/block restriction check if((restrictRules.blockid != CUPRINTF_UNRESTRICTED) && (restrictRules.blockid != (blockIdx.x + gridDim.x*blockIdx.y))) return NULL; if((restrictRules.threadid != CUPRINTF_UNRESTRICTED) && (restrictRules.threadid != (threadIdx.x + blockDim.x*threadIdx.y + blockDim.x*blockDim.y*threadIdx.z))) return NULL; // Conditional section, dependent on architecture #if __CUDA_ARCH__ == 100 // For sm_10 architectures, we have no atomic add - this means we must split the // entire available buffer into per-thread blocks. Inefficient, but what can you do. int thread_count = (gridDim.x * gridDim.y) * (blockDim.x * blockDim.y * blockDim.z); int thread_index = threadIdx.x + blockDim.x*threadIdx.y + blockDim.x*blockDim.y*threadIdx.z + (blockIdx.x + gridDim.x*blockIdx.y) * (blockDim.x * blockDim.y * blockDim.z); // Find our own block of data and go to it. Make sure the per-thread length // is a precise multiple of CUPRINTF_MAX_LEN, otherwise we risk size and // alignment issues! We must round down, of course. unsigned int thread_buf_len = printfBufferLength / thread_count; thread_buf_len &= ~(CUPRINTF_MAX_LEN-1); // We *must* have a thread buffer length able to fit at least two printfs (one header, one real) if(thread_buf_len < (CUPRINTF_MAX_LEN * 2)) return NULL; // Now address our section of the buffer. The first item is a header. char *myPrintfBuffer = globalPrintfBuffer + (thread_buf_len * thread_index); cuPrintfHeaderSM10 hdr = *(cuPrintfHeaderSM10 *)(void *)myPrintfBuffer; if(hdr.magic != CUPRINTF_SM10_MAGIC) { // If our header is not set up, initialise it hdr.magic = CUPRINTF_SM10_MAGIC; hdr.thread_index = thread_index; hdr.thread_buf_len = thread_buf_len; hdr.offset = 0; // Note we start at 0! We pre-increment below. *(cuPrintfHeaderSM10 *)(void *)myPrintfBuffer = hdr; // Write back the header // For initial setup purposes, we might need to init thread0's header too // (so that cudaPrintfDisplay() below will work). This is only run once. cuPrintfHeaderSM10 *tophdr = (cuPrintfHeaderSM10 *)(void *)globalPrintfBuffer; tophdr->thread_buf_len = thread_buf_len; } // Adjust the offset by the right amount, and wrap it if need be unsigned int offset = hdr.offset + CUPRINTF_MAX_LEN; if(offset >= hdr.thread_buf_len) offset = CUPRINTF_MAX_LEN; // Write back the new offset for next time and return a pointer to it ((cuPrintfHeaderSM10 *)(void *)myPrintfBuffer)->offset = offset; return myPrintfBuffer + offset; #else // Much easier with an atomic operation! size_t offset = atomicAdd((unsigned int *)&printfBufferPtr, CUPRINTF_MAX_LEN) - (size_t)globalPrintfBuffer; offset %= printfBufferLength; return globalPrintfBuffer + offset; #endif } // // writePrintfHeader // // Inserts the header for containing our UID, fmt position and // block/thread number. We generate it dynamically to avoid // issues arising from requiring pre-initialisation. // __device__ static void writePrintfHeader(char *ptr, char *fmtptr) { if(ptr) { cuPrintfHeader header; header.magic = CUPRINTF_SM11_MAGIC; header.fmtoffset = (unsigned short)(fmtptr - ptr); header.blockid = blockIdx.x + gridDim.x*blockIdx.y; header.threadid = threadIdx.x + blockDim.x*threadIdx.y + blockDim.x*blockDim.y*threadIdx.z; *(cuPrintfHeader *)(void *)ptr = header; } } // // cuPrintfStrncpy // // This special strncpy outputs an aligned length value, followed by the // string. It then zero-pads the rest of the string until a 64-aligned // boundary. The length *includes* the padding. A pointer to the byte // just after the \0 is returned. // // This function could overflow CUPRINTF_MAX_LEN characters in our buffer. // To avoid it, we must count as we output and truncate where necessary. // __device__ static char *cuPrintfStrncpy(char *dest, const char *src, int n, char *end) { // Initialisation and overflow check if(!dest || !src || (dest >= end)) return NULL; // Prepare to write the length specifier. We're guaranteed to have // at least "CUPRINTF_ALIGN_SIZE" bytes left because we only write out in // chunks that size, and CUPRINTF_MAX_LEN is aligned with CUPRINTF_ALIGN_SIZE. int *lenptr = (int *)(void *)dest; int len = 0; dest += CUPRINTF_ALIGN_SIZE; // Now copy the string while(n--) { if(dest >= end) // Overflow check break; len++; *dest++ = *src; if(*src++ == '\0') break; } // Now write out the padding bytes, and we have our length. while((dest < end) && (((long)dest & (CUPRINTF_ALIGN_SIZE-1)) != 0)) { len++; *dest++ = 0; } *lenptr = len; return (dest < end) ? dest : NULL; // Overflow means return NULL } // // copyArg // // This copies a length specifier and then the argument out to the // data buffer. Templates let the compiler figure all this out at // compile-time, making life much simpler from the programming // point of view. I'm assuimg all (const char *) is a string, and // everything else is the variable it points at. I'd love to see // a better way of doing it, but aside from parsing the format // string I can't think of one. // // The length of the data type is inserted at the beginning (so that // the display can distinguish between float and double), and the // pointer to the end of the entry is returned. // __device__ static char *copyArg(char *ptr, const char *arg, char *end) { // Initialisation check if(!ptr || !arg) return NULL; // strncpy does all our work. We just terminate. if((ptr = cuPrintfStrncpy(ptr, arg, CUPRINTF_MAX_LEN, end)) != NULL) *ptr = 0; return ptr; } template <typename T> __device__ static char *copyArg(char *ptr, T &arg, char *end) { // Initisalisation and overflow check. Alignment rules mean that // we're at least CUPRINTF_ALIGN_SIZE away from "end", so we only need // to check that one offset. if(!ptr || ((ptr+CUPRINTF_ALIGN_SIZE) >= end)) return NULL; // Write the length and argument *(int *)(void *)ptr = sizeof(arg); ptr += CUPRINTF_ALIGN_SIZE; *(T *)(void *)ptr = arg; ptr += CUPRINTF_ALIGN_SIZE; *ptr = 0; return ptr; } // // cuPrintf // // Templated printf functions to handle multiple arguments. // Note we return the total amount of data copied, not the number // of characters output. But then again, who ever looks at the // return from printf() anyway? // // The format is to grab a block of circular buffer space, the // start of which will hold a header and a pointer to the format // string. We then write in all the arguments, and finally the // format string itself. This is to make it easy to prevent // overflow of our buffer (we support up to 10 arguments, each of // which can be 12 bytes in length - that means that only the // format string (or a %s) can actually overflow; so the overflow // check need only be in the strcpy function. // // The header is written at the very last because that's what // makes it look like we're done. // // Errors, which are basically lack-of-initialisation, are ignored // in the called functions because NULL pointers are passed around // // All printf variants basically do the same thing, setting up the // buffer, writing all arguments, then finalising the header. For // clarity, we'll pack the code into some big macros. #define CUPRINTF_PREAMBLE \ char *start, *end, *bufptr, *fmtstart; \ if((start = getNextPrintfBufPtr()) == NULL) return 0; \ end = start + CUPRINTF_MAX_LEN; \ bufptr = start + sizeof(cuPrintfHeader); // Posting an argument is easy #define CUPRINTF_ARG(argname) \ bufptr = copyArg(bufptr, argname, end); // After args are done, record start-of-fmt and write the fmt and header #define CUPRINTF_POSTAMBLE \ fmtstart = bufptr; \ end = cuPrintfStrncpy(bufptr, fmt, CUPRINTF_MAX_LEN, end); \ writePrintfHeader(start, end ? fmtstart : NULL); \ return end ? (int)(end - start) : 0; __device__ int cuPrintf(const char *fmt) { CUPRINTF_PREAMBLE; CUPRINTF_POSTAMBLE; } template <typename T1> __device__ int cuPrintf(const char *fmt, T1 arg1) { CUPRINTF_PREAMBLE; CUPRINTF_ARG(arg1); CUPRINTF_POSTAMBLE; } template <typename T1, typename T2> __device__ int cuPrintf(const char *fmt, T1 arg1, T2 arg2) { CUPRINTF_PREAMBLE; CUPRINTF_ARG(arg1); CUPRINTF_ARG(arg2); CUPRINTF_POSTAMBLE; } template <typename T1, typename T2, typename T3> __device__ int cuPrintf(const char *fmt, T1 arg1, T2 arg2, T3 arg3) { CUPRINTF_PREAMBLE; CUPRINTF_ARG(arg1); CUPRINTF_ARG(arg2); CUPRINTF_ARG(arg3); CUPRINTF_POSTAMBLE; } template <typename T1, typename T2, typename T3, typename T4> __device__ int cuPrintf(const char *fmt, T1 arg1, T2 arg2, T3 arg3, T4 arg4) { CUPRINTF_PREAMBLE; CUPRINTF_ARG(arg1); CUPRINTF_ARG(arg2); CUPRINTF_ARG(arg3); CUPRINTF_ARG(arg4); CUPRINTF_POSTAMBLE; } template <typename T1, typename T2, typename T3, typename T4, typename T5> __device__ int cuPrintf(const char *fmt, T1 arg1, T2 arg2, T3 arg3, T4 arg4, T5 arg5) { CUPRINTF_PREAMBLE; CUPRINTF_ARG(arg1); CUPRINTF_ARG(arg2); CUPRINTF_ARG(arg3); CUPRINTF_ARG(arg4); CUPRINTF_ARG(arg5); CUPRINTF_POSTAMBLE; } template <typename T1, typename T2, typename T3, typename T4, typename T5, typename T6> __device__ int cuPrintf(const char *fmt, T1 arg1, T2 arg2, T3 arg3, T4 arg4, T5 arg5, T6 arg6) { CUPRINTF_PREAMBLE; CUPRINTF_ARG(arg1); CUPRINTF_ARG(arg2); CUPRINTF_ARG(arg3); CUPRINTF_ARG(arg4); CUPRINTF_ARG(arg5); CUPRINTF_ARG(arg6); CUPRINTF_POSTAMBLE; } template <typename T1, typename T2, typename T3, typename T4, typename T5, typename T6, typename T7> __device__ int cuPrintf(const char *fmt, T1 arg1, T2 arg2, T3 arg3, T4 arg4, T5 arg5, T6 arg6, T7 arg7) { CUPRINTF_PREAMBLE; CUPRINTF_ARG(arg1); CUPRINTF_ARG(arg2); CUPRINTF_ARG(arg3); CUPRINTF_ARG(arg4); CUPRINTF_ARG(arg5); CUPRINTF_ARG(arg6); CUPRINTF_ARG(arg7); CUPRINTF_POSTAMBLE; } template <typename T1, typename T2, typename T3, typename T4, typename T5, typename T6, typename T7, typename T8> __device__ int cuPrintf(const char *fmt, T1 arg1, T2 arg2, T3 arg3, T4 arg4, T5 arg5, T6 arg6, T7 arg7, T8 arg8) { CUPRINTF_PREAMBLE; CUPRINTF_ARG(arg1); CUPRINTF_ARG(arg2); CUPRINTF_ARG(arg3); CUPRINTF_ARG(arg4); CUPRINTF_ARG(arg5); CUPRINTF_ARG(arg6); CUPRINTF_ARG(arg7); CUPRINTF_ARG(arg8); CUPRINTF_POSTAMBLE; } template <typename T1, typename T2, typename T3, typename T4, typename T5, typename T6, typename T7, typename T8, typename T9> __device__ int cuPrintf(const char *fmt, T1 arg1, T2 arg2, T3 arg3, T4 arg4, T5 arg5, T6 arg6, T7 arg7, T8 arg8, T9 arg9) { CUPRINTF_PREAMBLE; CUPRINTF_ARG(arg1); CUPRINTF_ARG(arg2); CUPRINTF_ARG(arg3); CUPRINTF_ARG(arg4); CUPRINTF_ARG(arg5); CUPRINTF_ARG(arg6); CUPRINTF_ARG(arg7); CUPRINTF_ARG(arg8); CUPRINTF_ARG(arg9); CUPRINTF_POSTAMBLE; } template <typename T1, typename T2, typename T3, typename T4, typename T5, typename T6, typename T7, typename T8, typename T9, typename T10> __device__ int cuPrintf(const char *fmt, T1 arg1, T2 arg2, T3 arg3, T4 arg4, T5 arg5, T6 arg6, T7 arg7, T8 arg8, T9 arg9, T10 arg10) { CUPRINTF_PREAMBLE; CUPRINTF_ARG(arg1); CUPRINTF_ARG(arg2); CUPRINTF_ARG(arg3); CUPRINTF_ARG(arg4); CUPRINTF_ARG(arg5); CUPRINTF_ARG(arg6); CUPRINTF_ARG(arg7); CUPRINTF_ARG(arg8); CUPRINTF_ARG(arg9); CUPRINTF_ARG(arg10); CUPRINTF_POSTAMBLE; } #undef CUPRINTF_PREAMBLE #undef CUPRINTF_ARG #undef CUPRINTF_POSTAMBLE // // cuPrintfRestrict // // Called to restrict output to a given thread/block. // We store the info in "restrictRules", which is set up at // init time by the host. It's not the cleanest way to do this // because it means restrictions will last between // invocations, but given the output-pointer continuity, // I feel this is reasonable. // __device__ void cuPrintfRestrict(int threadid, int blockid) { int thread_count = blockDim.x * blockDim.y * blockDim.z; if(((threadid < thread_count) && (threadid >= 0)) || (threadid == CUPRINTF_UNRESTRICTED)) restrictRules.threadid = threadid; int block_count = gridDim.x * gridDim.y; if(((blockid < block_count) && (blockid >= 0)) || (blockid == CUPRINTF_UNRESTRICTED)) restrictRules.blockid = blockid; } /////////////////////////////////////////////////////////////////////////////// // HOST SIDE #include <stdio.h> static FILE *printf_fp; static char *printfbuf_start=NULL; static char *printfbuf_device=NULL; static int printfbuf_len=0; // // outputPrintfData // // Our own internal function, which takes a pointer to a data buffer // and passes it through libc's printf for output. // // We receive the formate string and a pointer to where the data is // held. We then run through and print it out. // // Returns 0 on failure, 1 on success // static int outputPrintfData(char *fmt, char *data) { // Format string is prefixed by a length that we don't need fmt += CUPRINTF_ALIGN_SIZE; // Now run through it, printing everything we can. We must // run to every % character, extract only that, and use printf // to format it. char *p = strchr(fmt, '%'); while(p != NULL) { // Print up to the % character *p = '\0'; fputs(fmt, printf_fp); *p = '%'; // Put back the % // Now handle the format specifier char *format = p++; // Points to the '%' p += strcspn(p, "%cdiouxXeEfgGaAnps"); if(*p == '\0') // If no format specifier, print the whole thing { fmt = format; break; } // Cut out the format bit and use printf to print it. It's prefixed // by its length. int arglen = *(int *)data; if(arglen > CUPRINTF_MAX_LEN) { fputs("Corrupt printf buffer data - aborting\n", printf_fp); return 0; } data += CUPRINTF_ALIGN_SIZE; char specifier = *p++; char c = *p; // Store for later *p = '\0'; switch(specifier) { // These all take integer arguments case 'c': case 'd': case 'i': case 'o': case 'u': case 'x': case 'X': case 'p': fprintf(printf_fp, format, *((int *)data)); break; // These all take double arguments case 'e': case 'E': case 'f': case 'g': case 'G': case 'a': case 'A': if(arglen == 4) // Float vs. Double thing fprintf(printf_fp, format, *((float *)data)); else fprintf(printf_fp, format, *((double *)data)); break; // Strings are handled in a special way case 's': fprintf(printf_fp, format, (char *)data); break; // % is special case '%': fprintf(printf_fp, "%%"); break; // Everything else is just printed out as-is default: fprintf(printf_fp, format); break; } data += CUPRINTF_ALIGN_SIZE; // Move on to next argument *p = c; // Restore what we removed fmt = p; // Adjust fmt string to be past the specifier p = strchr(fmt, '%'); // and get the next specifier } // Print out the last of the string fputs(fmt, printf_fp); return 1; } // // doPrintfDisplay // // This runs through the blocks of CUPRINTF_MAX_LEN-sized data, calling the // print function above to display them. We've got this separate from // cudaPrintfDisplay() below so we can handle the SM_10 architecture // partitioning. // static int doPrintfDisplay(int headings, int clear, char *bufstart, char *bufend, char *bufptr, char *endptr) { // Grab, piece-by-piece, each output element until we catch // up with the circular buffer end pointer int printf_count=0; char printfbuf_local[CUPRINTF_MAX_LEN+1]; printfbuf_local[CUPRINTF_MAX_LEN] = '\0'; while(bufptr != endptr) { // Wrap ourselves at the end-of-buffer if(bufptr == bufend) bufptr = bufstart; // Adjust our start pointer to within the circular buffer and copy a block. cudaMemcpy(printfbuf_local, bufptr, CUPRINTF_MAX_LEN, cudaMemcpyDeviceToHost); // If the magic number isn't valid, then this write hasn't gone through // yet and we'll wait until it does (or we're past the end for non-async printfs). cuPrintfHeader *hdr = (cuPrintfHeader *)printfbuf_local; if((hdr->magic != CUPRINTF_SM11_MAGIC) || (hdr->fmtoffset >= CUPRINTF_MAX_LEN)) { //fprintf(printf_fp, "Bad magic number in printf header\n"); break; } // Extract all the info and get this printf done if(headings) fprintf(printf_fp, "[%d, %d]: ", hdr->blockid, hdr->threadid); if(hdr->fmtoffset == 0) fprintf(printf_fp, "printf buffer overflow\n"); else if(!outputPrintfData(printfbuf_local+hdr->fmtoffset, printfbuf_local+sizeof(cuPrintfHeader))) break; printf_count++; // Clear if asked if(clear) cudaMemset(bufptr, 0, CUPRINTF_MAX_LEN); // Now advance our start location, because we're done, and keep copying bufptr += CUPRINTF_MAX_LEN; } return printf_count; } // // cudaPrintfInit // // Takes a buffer length to allocate, creates the memory on the device and // returns a pointer to it for when a kernel is called. It's up to the caller // to free it. // extern "C" cudaError_t cudaPrintfInit(size_t bufferLen) { // Fix up bufferlen to be a multiple of CUPRINTF_MAX_LEN bufferLen = (bufferLen < CUPRINTF_MAX_LEN) ? CUPRINTF_MAX_LEN : bufferLen; if((bufferLen % CUPRINTF_MAX_LEN) > 0) bufferLen += (CUPRINTF_MAX_LEN - (bufferLen % CUPRINTF_MAX_LEN)); printfbuf_len = (int)bufferLen; // Allocate a print buffer on the device and zero it if(cudaMalloc((void **)&printfbuf_device, printfbuf_len) != cudaSuccess) return cudaErrorInitializationError; cudaMemset(printfbuf_device, 0, printfbuf_len); printfbuf_start = printfbuf_device; // Where we start reading from // No restrictions to begin with cuPrintfRestriction restrict; restrict.threadid = restrict.blockid = CUPRINTF_UNRESTRICTED; cudaMemcpyToSymbol(restrictRules, &restrict, sizeof(restrict)); // Initialise the buffer and the respective lengths/pointers. cudaMemcpyToSymbol(globalPrintfBuffer, &printfbuf_device, sizeof(char *)); cudaMemcpyToSymbol(printfBufferPtr, &printfbuf_device, sizeof(char *)); cudaMemcpyToSymbol(printfBufferLength, &printfbuf_len, sizeof(printfbuf_len)); return cudaSuccess; } // // cudaPrintfEnd // // Frees up the memory which we allocated // extern "C" void cudaPrintfEnd() { if(!printfbuf_start || !printfbuf_device) return; cudaFree(printfbuf_device); printfbuf_start = printfbuf_device = NULL; } // // cudaPrintfDisplay // // Each call to this function dumps the entire current contents // of the printf buffer to the pre-specified FILE pointer. The // circular "start" pointer is advanced so that subsequent calls // dumps only new stuff. // // In the case of async memory access (via streams), call this // repeatedly to keep trying to empty the buffer. If it's a sync // access, then the whole buffer should empty in one go. // // Arguments: // outputFP - File descriptor to output to (NULL => stdout) // showThreadID - If true, prints [block,thread] before each line // extern "C" cudaError_t cudaPrintfDisplay(void *outputFP, bool showThreadID) { printf_fp = (FILE *)((outputFP == NULL) ? stdout : outputFP); // For now, we force "synchronous" mode which means we're not concurrent // with kernel execution. This also means we don't need clearOnPrint. // If you're patching it for async operation, here's where you want it. bool sync_printfs = true; bool clearOnPrint = false; // Initialisation check if(!printfbuf_start || !printfbuf_device || !printf_fp) return cudaErrorMissingConfiguration; // To determine which architecture we're using, we read the // first short from the buffer - it'll be the magic number // relating to the version. unsigned short magic; cudaMemcpy(&magic, printfbuf_device, sizeof(unsigned short), cudaMemcpyDeviceToHost); // For SM_10 architecture, we've split our buffer into one-per-thread. // That means we must do each thread block separately. It'll require // extra reading. We also, for now, don't support async printfs because // that requires tracking one start pointer per thread. if(magic == CUPRINTF_SM10_MAGIC) { sync_printfs = true; clearOnPrint = false; int blocklen = 0; char *blockptr = printfbuf_device; while(blockptr < (printfbuf_device + printfbuf_len)) { cuPrintfHeaderSM10 hdr; cudaMemcpy(&hdr, blockptr, sizeof(hdr), cudaMemcpyDeviceToHost); // We get our block-size-step from the very first header if(hdr.thread_buf_len != 0) blocklen = hdr.thread_buf_len; // No magic number means no printfs from this thread if(hdr.magic != CUPRINTF_SM10_MAGIC) { if(blocklen == 0) { fprintf(printf_fp, "No printf headers found at all!\n"); break; // No valid headers! } blockptr += blocklen; continue; } // "offset" is non-zero then we can print the block contents if(hdr.offset > 0) { // For synchronous printfs, we must print from endptr->bufend, then from start->end if(sync_printfs) doPrintfDisplay(showThreadID, clearOnPrint, blockptr+CUPRINTF_MAX_LEN, blockptr+hdr.thread_buf_len, blockptr+hdr.offset+CUPRINTF_MAX_LEN, blockptr+hdr.thread_buf_len); doPrintfDisplay(showThreadID, clearOnPrint, blockptr+CUPRINTF_MAX_LEN, blockptr+hdr.thread_buf_len, blockptr+CUPRINTF_MAX_LEN, blockptr+hdr.offset+CUPRINTF_MAX_LEN); } // Move on to the next block and loop again blockptr += hdr.thread_buf_len; } } // For SM_11 and up, everything is a single buffer and it's simple else if(magic == CUPRINTF_SM11_MAGIC) { // Grab the current "end of circular buffer" pointer. char *printfbuf_end = NULL; cudaMemcpyFromSymbol(&printfbuf_end, printfBufferPtr, sizeof(char *)); // Adjust our starting and ending pointers to within the block char *bufptr = ((printfbuf_start - printfbuf_device) % printfbuf_len) + printfbuf_device; char *endptr = ((printfbuf_end - printfbuf_device) % printfbuf_len) + printfbuf_device; // For synchronous (i.e. after-kernel-exit) printf display, we have to handle circular // buffer wrap carefully because we could miss those past "end". if(sync_printfs) doPrintfDisplay(showThreadID, clearOnPrint, printfbuf_device, printfbuf_device+printfbuf_len, endptr, printfbuf_device+printfbuf_len); doPrintfDisplay(showThreadID, clearOnPrint, printfbuf_device, printfbuf_device+printfbuf_len, bufptr, endptr); printfbuf_start = printfbuf_end; } else ;//printf("Bad magic number in cuPrintf buffer header\n"); // If we were synchronous, then we must ensure that the memory is cleared on exit // otherwise another kernel launch with a different grid size could conflict. if(sync_printfs) cudaMemset(printfbuf_device, 0, printfbuf_len); return cudaSuccess; } // Cleanup #undef CUPRINTF_MAX_LEN #undef CUPRINTF_ALIGN_SIZE #undef CUPRINTF_SM10_MAGIC #undef CUPRINTF_SM11_MAGIC #endif
d8543b9f7d7ec463ca55c264e4742e673bbb1a63.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include "includes.h" __global__ void multKernel(float *a, float *b, float *ab, int width) { int tx = threadIdx.x, ty = threadIdx.y; int bx = blockIdx.x, by = blockIdx.y; // allocate tiles in __shared__ memory __shared__ float s_a[TILE_WIDTH][TILE_WIDTH]; __shared__ float s_b[TILE_WIDTH][TILE_WIDTH]; // calculate the row & col index to identify element to work on int row = by*blockDim.y + ty; int col = bx*blockDim.x + tx; float result = 0; // loop over the tiles of the input in phases for(int p = 0; p < width/TILE_WIDTH; ++p) { // collaboratively load tiles into shared memory: row-wise and column wise respectively s_a[ty][tx] = a[row*width + (p*TILE_WIDTH + tx)]; s_b[ty][tx] = b[(p*TILE_WIDTH + ty)*width + col]; __syncthreads(); // dot product between row of s_a and col of s_b for(int k = 0; k < TILE_WIDTH; ++k) result += s_a[ty][k] * s_b[k][tx]; __syncthreads(); } ab[row*width+col] = result; }
d8543b9f7d7ec463ca55c264e4742e673bbb1a63.cu
#include "includes.h" __global__ void multKernel(float *a, float *b, float *ab, int width) { int tx = threadIdx.x, ty = threadIdx.y; int bx = blockIdx.x, by = blockIdx.y; // allocate tiles in __shared__ memory __shared__ float s_a[TILE_WIDTH][TILE_WIDTH]; __shared__ float s_b[TILE_WIDTH][TILE_WIDTH]; // calculate the row & col index to identify element to work on int row = by*blockDim.y + ty; int col = bx*blockDim.x + tx; float result = 0; // loop over the tiles of the input in phases for(int p = 0; p < width/TILE_WIDTH; ++p) { // collaboratively load tiles into shared memory: row-wise and column wise respectively s_a[ty][tx] = a[row*width + (p*TILE_WIDTH + tx)]; s_b[ty][tx] = b[(p*TILE_WIDTH + ty)*width + col]; __syncthreads(); // dot product between row of s_a and col of s_b for(int k = 0; k < TILE_WIDTH; ++k) result += s_a[ty][k] * s_b[k][tx]; __syncthreads(); } ab[row*width+col] = result; }
e24cc0b238611f7c3462698f0b0a10624dfad3aa.hip
// !!! This is a file automatically generated by hipify!!! #include <stdio.h> #include "ed25519.h" #include <inttypes.h> #include <assert.h> #include <vector> #include <pthread.h> #include "gpu_common.h" #include "gpu_ctx.h" #define USE_CLOCK_GETTIME #include "perftime.h" #define PACKET_SIZE 512 typedef struct { size_t size; uint64_t num_retransmits; uint16_t addr[8]; uint16_t port; bool v6; } streamer_Meta; typedef struct { uint8_t data[PACKET_SIZE]; streamer_Meta meta; } streamer_Packet; void print_dwords(unsigned char* ptr, int size) { for (int j = 0; j < (size)/(int)sizeof(uint32_t); j++) { LOG("%x ", ((uint32_t*)ptr)[j]); } } typedef struct { uint8_t signature[SIG_SIZE]; uint8_t public_key[PUB_KEY_SIZE]; uint32_t message_len; uint8_t message[8]; } packet_t; typedef struct { gpu_Elems* elems_h; uint32_t num_elems; uint32_t total_packets; uint32_t total_signatures; uint32_t* message_lens; uint32_t* public_key_offsets; uint32_t* signature_offsets; uint32_t* message_start_offsets; uint8_t* out_h; int num_iterations; uint8_t use_non_default_stream; } verify_cpu_ctx_t; static void* verify_proc(void* ctx) { verify_cpu_ctx_t* vctx = (verify_cpu_ctx_t*)ctx; LOG("Start iterations\n"); for (int i = 0; i < vctx->num_iterations; i++) { ed25519_verify_many(&vctx->elems_h[0], vctx->num_elems, sizeof(streamer_Packet), vctx->total_packets, vctx->total_signatures, vctx->message_lens, vctx->public_key_offsets, vctx->signature_offsets, vctx->message_start_offsets, vctx->out_h, vctx->use_non_default_stream); } LOG("Done iterations\n"); return NULL; } const static bool USE_CUDA_ALLOC = true; template<typename T> static void ed25519_alloc(T** ptr, size_t num) { if (USE_CUDA_ALLOC) { CUDA_CHK(hipHostMalloc(ptr, sizeof(T) * num)); } else { *ptr = (T*)calloc(sizeof(T), num); } } static void ed25519_free(void* ptr) { if (USE_CUDA_ALLOC) { CUDA_CHK(hipHostFree(ptr)); } else { free(ptr); } } int main(int argc, const char* argv[]) { int arg; bool verbose = false; for (arg = 1; arg < argc; arg++) { if (0 == strcmp(argv[arg], "-v")) { verbose = true; } else { break; } } if ((argc - arg) != 6) { printf("usage: %s [-v] <num_signatures> <num_elems> <num_sigs_per_packet> <num_threads> <num_iterations> <use_non_default_stream>\n", argv[0]); return 1; } ed25519_set_verbose(verbose); int num_signatures_per_elem = strtol(argv[arg++], NULL, 10); if (num_signatures_per_elem <= 0) { printf("num_signatures_per_elem should be > 0! %d\n", num_signatures_per_elem); return 1; } int num_elems = strtol(argv[arg++], NULL, 10); if (num_elems <= 0) { printf("num_elems should be > 0! %d\n", num_elems); return 1; } int num_sigs_per_packet = strtol(argv[arg++], NULL, 10); if (num_sigs_per_packet <= 0) { printf("num_sigs_per_packet should be > 0! %d\n", num_sigs_per_packet); return 1; } int num_threads = strtol(argv[arg++], NULL, 10); if (num_threads <= 0) { printf("num_threads should be > 0! %d\n", num_threads); return 1; } int num_iterations = strtol(argv[arg++], NULL, 10); if (num_iterations <= 0) { printf("num_iterations should be > 0! %d\n", num_iterations); return 1; } uint8_t use_non_default_stream = (uint8_t)strtol(argv[arg++], NULL, 10); if (use_non_default_stream != 0 && use_non_default_stream != 1) { printf("non_default_stream should be 0 or 1! %d\n", use_non_default_stream); return 1; } LOG("streamer size: %zu elems size: %zu\n", sizeof(streamer_Packet), sizeof(gpu_Elems)); std::vector<verify_cpu_ctx_t> vctx = std::vector<verify_cpu_ctx_t>(num_threads); // Host allocate unsigned char* seed_h = (unsigned char*)calloc(num_signatures_per_elem * SEED_SIZE, sizeof(uint32_t)); unsigned char* private_key_h = (unsigned char*)calloc(num_signatures_per_elem, PRIV_KEY_SIZE); unsigned char message_h[] = "abcd1234"; int message_h_len = strlen((char*)message_h); uint32_t total_signatures = num_elems * num_signatures_per_elem; uint32_t* message_lens = NULL; ed25519_alloc(&message_lens, total_signatures); uint32_t* signature_offsets = NULL; ed25519_alloc(&signature_offsets, total_signatures); uint32_t* public_key_offsets = NULL; ed25519_alloc(&public_key_offsets, total_signatures); uint32_t* message_start_offsets = NULL; ed25519_alloc(&message_start_offsets, total_signatures); for (uint32_t i = 0; i < total_signatures; i++) { uint32_t base_offset = i * sizeof(streamer_Packet); signature_offsets[i] = base_offset + offsetof(packet_t, signature); public_key_offsets[i] = base_offset + offsetof(packet_t, public_key); message_start_offsets[i] = base_offset + offsetof(packet_t, message); message_lens[i] = message_h_len; } for (int i = 0; i < num_threads; i++) { vctx[i].message_lens = message_lens; vctx[i].signature_offsets = signature_offsets; vctx[i].public_key_offsets = public_key_offsets; vctx[i].message_start_offsets = message_start_offsets; vctx[i].num_iterations = num_iterations; vctx[i].use_non_default_stream = use_non_default_stream; } streamer_Packet* packets_h = NULL; ed25519_alloc(&packets_h, num_signatures_per_elem); uint32_t total_packets = 0; gpu_Elems* elems_h = NULL; ed25519_alloc(&elems_h, num_elems); for (int i = 0; i < num_elems; i++) { elems_h[i].num = num_signatures_per_elem; elems_h[i].elems = (uint8_t*)&packets_h[0]; total_packets += num_signatures_per_elem; } LOG("initing signatures..\n"); for (int i = 0; i < num_signatures_per_elem; i++) { packet_t* packet = (packet_t*)packets_h[i].data; memcpy(packet->message, message_h, message_h_len); LOG("message_len: %d\n", message_h_len); } for (uint32_t i = 0; i < total_signatures; i++) { LOG("sig_offset: %d pub_key_offset: %d message_start_offset: %d message_len: %d\n", signature_offsets[i], public_key_offsets[i], message_start_offsets[i], message_lens[i]); } int out_size = total_signatures * sizeof(uint8_t); for (int i = 0; i < num_threads; i++) { vctx[i].num_elems = num_elems; ed25519_alloc(&vctx[i].out_h, out_size); vctx[i].elems_h = &elems_h[0]; vctx[i].total_signatures = total_signatures; vctx[i].total_packets = total_packets; } LOG("creating seed..\n"); int ret = ed25519_create_seed(seed_h); LOG("create_seed: %d\n", ret); packet_t* first_packet_h = (packet_t*)packets_h[0].data; ed25519_create_keypair(first_packet_h->public_key, private_key_h, seed_h); ed25519_sign(first_packet_h->signature, first_packet_h->message, message_h_len, first_packet_h->public_key, private_key_h); ret = ed25519_verify(first_packet_h->signature, message_h, message_h_len, first_packet_h->public_key); LOG("verify: %d\n", ret); for (int i = 1; i < num_signatures_per_elem; i++) { packet_t* packet_h = (packet_t*)packets_h[i].data; memcpy(packet_h->signature, first_packet_h->signature, SIG_SIZE); memcpy(packet_h->public_key, first_packet_h->public_key, PUB_KEY_SIZE); } for (int i = 0; i < num_signatures_per_elem; i++ ) { packet_t* packet_h = (packet_t*)packets_h[i].data; unsigned char* sig_ptr = packet_h->signature; unsigned char* messages_ptr = packet_h->message; LOG("sig:"); print_dwords(sig_ptr, SIG_SIZE); LOG("\nmessage: "); print_dwords(messages_ptr, message_h_len); LOG("\n\n"); } LOG("\n"); std::vector<pthread_t> threads = std::vector<pthread_t>(num_threads); pthread_attr_t attr; ret = pthread_attr_init(&attr); if (ret != 0) { LOG("ERROR: pthread_attr_init: %d\n", ret); return 1; } perftime_t start, end; get_time(&start); for (int i = 0; i < num_threads; i++) { ret = pthread_create(&threads[i], &attr, verify_proc, &vctx[i]); if (ret != 0) { LOG("ERROR: pthread_create: %d\n", ret); return 1; } } void* res = NULL; for (int i = 0; i < num_threads; i++) { ret = pthread_join(threads[i], &res); if (ret != 0) { LOG("ERROR: pthread_join: %d\n", ret); return 1; } } get_time(&end); int total = (num_threads * total_signatures * num_iterations); double diff = get_diff(&start, &end); printf("time diff: %f total: %d sigs/sec: %f\n", diff, total, (double)total / (diff / 1e6)); for (int thread = 0; thread < num_threads; thread++) { LOG("ret:\n"); bool verify_failed = false; for (int i = 0; i < out_size / (int)sizeof(uint8_t); i++) { LOG("%x ", vctx[thread].out_h[i]); if (vctx[thread].out_h[i] != 1) { verify_failed = true; } } LOG("\n"); fflush(stdout); assert(verify_failed == false); } ed25519_free(elems_h); ed25519_free(packets_h); ed25519_free(message_lens); ed25519_free(signature_offsets); ed25519_free(public_key_offsets); ed25519_free(message_start_offsets); for (int thread = 0; thread < num_threads; thread++) { ed25519_free(vctx[thread].out_h); } free(seed_h); free(private_key_h); ed25519_free_gpu_mem(); return 0; }
e24cc0b238611f7c3462698f0b0a10624dfad3aa.cu
#include <stdio.h> #include "ed25519.h" #include <inttypes.h> #include <assert.h> #include <vector> #include <pthread.h> #include "gpu_common.h" #include "gpu_ctx.h" #define USE_CLOCK_GETTIME #include "perftime.h" #define PACKET_SIZE 512 typedef struct { size_t size; uint64_t num_retransmits; uint16_t addr[8]; uint16_t port; bool v6; } streamer_Meta; typedef struct { uint8_t data[PACKET_SIZE]; streamer_Meta meta; } streamer_Packet; void print_dwords(unsigned char* ptr, int size) { for (int j = 0; j < (size)/(int)sizeof(uint32_t); j++) { LOG("%x ", ((uint32_t*)ptr)[j]); } } typedef struct { uint8_t signature[SIG_SIZE]; uint8_t public_key[PUB_KEY_SIZE]; uint32_t message_len; uint8_t message[8]; } packet_t; typedef struct { gpu_Elems* elems_h; uint32_t num_elems; uint32_t total_packets; uint32_t total_signatures; uint32_t* message_lens; uint32_t* public_key_offsets; uint32_t* signature_offsets; uint32_t* message_start_offsets; uint8_t* out_h; int num_iterations; uint8_t use_non_default_stream; } verify_cpu_ctx_t; static void* verify_proc(void* ctx) { verify_cpu_ctx_t* vctx = (verify_cpu_ctx_t*)ctx; LOG("Start iterations\n"); for (int i = 0; i < vctx->num_iterations; i++) { ed25519_verify_many(&vctx->elems_h[0], vctx->num_elems, sizeof(streamer_Packet), vctx->total_packets, vctx->total_signatures, vctx->message_lens, vctx->public_key_offsets, vctx->signature_offsets, vctx->message_start_offsets, vctx->out_h, vctx->use_non_default_stream); } LOG("Done iterations\n"); return NULL; } const static bool USE_CUDA_ALLOC = true; template<typename T> static void ed25519_alloc(T** ptr, size_t num) { if (USE_CUDA_ALLOC) { CUDA_CHK(cudaMallocHost(ptr, sizeof(T) * num)); } else { *ptr = (T*)calloc(sizeof(T), num); } } static void ed25519_free(void* ptr) { if (USE_CUDA_ALLOC) { CUDA_CHK(cudaFreeHost(ptr)); } else { free(ptr); } } int main(int argc, const char* argv[]) { int arg; bool verbose = false; for (arg = 1; arg < argc; arg++) { if (0 == strcmp(argv[arg], "-v")) { verbose = true; } else { break; } } if ((argc - arg) != 6) { printf("usage: %s [-v] <num_signatures> <num_elems> <num_sigs_per_packet> <num_threads> <num_iterations> <use_non_default_stream>\n", argv[0]); return 1; } ed25519_set_verbose(verbose); int num_signatures_per_elem = strtol(argv[arg++], NULL, 10); if (num_signatures_per_elem <= 0) { printf("num_signatures_per_elem should be > 0! %d\n", num_signatures_per_elem); return 1; } int num_elems = strtol(argv[arg++], NULL, 10); if (num_elems <= 0) { printf("num_elems should be > 0! %d\n", num_elems); return 1; } int num_sigs_per_packet = strtol(argv[arg++], NULL, 10); if (num_sigs_per_packet <= 0) { printf("num_sigs_per_packet should be > 0! %d\n", num_sigs_per_packet); return 1; } int num_threads = strtol(argv[arg++], NULL, 10); if (num_threads <= 0) { printf("num_threads should be > 0! %d\n", num_threads); return 1; } int num_iterations = strtol(argv[arg++], NULL, 10); if (num_iterations <= 0) { printf("num_iterations should be > 0! %d\n", num_iterations); return 1; } uint8_t use_non_default_stream = (uint8_t)strtol(argv[arg++], NULL, 10); if (use_non_default_stream != 0 && use_non_default_stream != 1) { printf("non_default_stream should be 0 or 1! %d\n", use_non_default_stream); return 1; } LOG("streamer size: %zu elems size: %zu\n", sizeof(streamer_Packet), sizeof(gpu_Elems)); std::vector<verify_cpu_ctx_t> vctx = std::vector<verify_cpu_ctx_t>(num_threads); // Host allocate unsigned char* seed_h = (unsigned char*)calloc(num_signatures_per_elem * SEED_SIZE, sizeof(uint32_t)); unsigned char* private_key_h = (unsigned char*)calloc(num_signatures_per_elem, PRIV_KEY_SIZE); unsigned char message_h[] = "abcd1234"; int message_h_len = strlen((char*)message_h); uint32_t total_signatures = num_elems * num_signatures_per_elem; uint32_t* message_lens = NULL; ed25519_alloc(&message_lens, total_signatures); uint32_t* signature_offsets = NULL; ed25519_alloc(&signature_offsets, total_signatures); uint32_t* public_key_offsets = NULL; ed25519_alloc(&public_key_offsets, total_signatures); uint32_t* message_start_offsets = NULL; ed25519_alloc(&message_start_offsets, total_signatures); for (uint32_t i = 0; i < total_signatures; i++) { uint32_t base_offset = i * sizeof(streamer_Packet); signature_offsets[i] = base_offset + offsetof(packet_t, signature); public_key_offsets[i] = base_offset + offsetof(packet_t, public_key); message_start_offsets[i] = base_offset + offsetof(packet_t, message); message_lens[i] = message_h_len; } for (int i = 0; i < num_threads; i++) { vctx[i].message_lens = message_lens; vctx[i].signature_offsets = signature_offsets; vctx[i].public_key_offsets = public_key_offsets; vctx[i].message_start_offsets = message_start_offsets; vctx[i].num_iterations = num_iterations; vctx[i].use_non_default_stream = use_non_default_stream; } streamer_Packet* packets_h = NULL; ed25519_alloc(&packets_h, num_signatures_per_elem); uint32_t total_packets = 0; gpu_Elems* elems_h = NULL; ed25519_alloc(&elems_h, num_elems); for (int i = 0; i < num_elems; i++) { elems_h[i].num = num_signatures_per_elem; elems_h[i].elems = (uint8_t*)&packets_h[0]; total_packets += num_signatures_per_elem; } LOG("initing signatures..\n"); for (int i = 0; i < num_signatures_per_elem; i++) { packet_t* packet = (packet_t*)packets_h[i].data; memcpy(packet->message, message_h, message_h_len); LOG("message_len: %d\n", message_h_len); } for (uint32_t i = 0; i < total_signatures; i++) { LOG("sig_offset: %d pub_key_offset: %d message_start_offset: %d message_len: %d\n", signature_offsets[i], public_key_offsets[i], message_start_offsets[i], message_lens[i]); } int out_size = total_signatures * sizeof(uint8_t); for (int i = 0; i < num_threads; i++) { vctx[i].num_elems = num_elems; ed25519_alloc(&vctx[i].out_h, out_size); vctx[i].elems_h = &elems_h[0]; vctx[i].total_signatures = total_signatures; vctx[i].total_packets = total_packets; } LOG("creating seed..\n"); int ret = ed25519_create_seed(seed_h); LOG("create_seed: %d\n", ret); packet_t* first_packet_h = (packet_t*)packets_h[0].data; ed25519_create_keypair(first_packet_h->public_key, private_key_h, seed_h); ed25519_sign(first_packet_h->signature, first_packet_h->message, message_h_len, first_packet_h->public_key, private_key_h); ret = ed25519_verify(first_packet_h->signature, message_h, message_h_len, first_packet_h->public_key); LOG("verify: %d\n", ret); for (int i = 1; i < num_signatures_per_elem; i++) { packet_t* packet_h = (packet_t*)packets_h[i].data; memcpy(packet_h->signature, first_packet_h->signature, SIG_SIZE); memcpy(packet_h->public_key, first_packet_h->public_key, PUB_KEY_SIZE); } for (int i = 0; i < num_signatures_per_elem; i++ ) { packet_t* packet_h = (packet_t*)packets_h[i].data; unsigned char* sig_ptr = packet_h->signature; unsigned char* messages_ptr = packet_h->message; LOG("sig:"); print_dwords(sig_ptr, SIG_SIZE); LOG("\nmessage: "); print_dwords(messages_ptr, message_h_len); LOG("\n\n"); } LOG("\n"); std::vector<pthread_t> threads = std::vector<pthread_t>(num_threads); pthread_attr_t attr; ret = pthread_attr_init(&attr); if (ret != 0) { LOG("ERROR: pthread_attr_init: %d\n", ret); return 1; } perftime_t start, end; get_time(&start); for (int i = 0; i < num_threads; i++) { ret = pthread_create(&threads[i], &attr, verify_proc, &vctx[i]); if (ret != 0) { LOG("ERROR: pthread_create: %d\n", ret); return 1; } } void* res = NULL; for (int i = 0; i < num_threads; i++) { ret = pthread_join(threads[i], &res); if (ret != 0) { LOG("ERROR: pthread_join: %d\n", ret); return 1; } } get_time(&end); int total = (num_threads * total_signatures * num_iterations); double diff = get_diff(&start, &end); printf("time diff: %f total: %d sigs/sec: %f\n", diff, total, (double)total / (diff / 1e6)); for (int thread = 0; thread < num_threads; thread++) { LOG("ret:\n"); bool verify_failed = false; for (int i = 0; i < out_size / (int)sizeof(uint8_t); i++) { LOG("%x ", vctx[thread].out_h[i]); if (vctx[thread].out_h[i] != 1) { verify_failed = true; } } LOG("\n"); fflush(stdout); assert(verify_failed == false); } ed25519_free(elems_h); ed25519_free(packets_h); ed25519_free(message_lens); ed25519_free(signature_offsets); ed25519_free(public_key_offsets); ed25519_free(message_start_offsets); for (int thread = 0; thread < num_threads; thread++) { ed25519_free(vctx[thread].out_h); } free(seed_h); free(private_key_h); ed25519_free_gpu_mem(); return 0; }
cafb4689ab3d3424f6f53be3849ceb3555c1ea18.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /* * Copyright (c) 2019-2020, NVIDIA CORPORATION. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #include <cudf/column/column_device_view.cuh> #include <cudf/detail/null_mask.hpp> #include <cudf/detail/nvtx/ranges.hpp> #include <cudf/detail/utilities/cuda.cuh> #include <cudf/detail/utilities/integer_utils.hpp> #include <cudf/null_mask.hpp> #include <cudf/table/table_view.hpp> #include <cudf/utilities/bit.hpp> #include <cudf/utilities/error.hpp> #include <rmm/cuda_stream_view.hpp> #include <rmm/device_buffer.hpp> #include <rmm/device_scalar.hpp> #include <rmm/device_vector.hpp> #include <rmm/mr/device/device_memory_resource.hpp> #include <thrust/binary_search.h> #include <thrust/copy.h> #include <thrust/device_ptr.h> #include <thrust/extrema.h> #include <hipcub/hipcub.hpp> #include <algorithm> #include <numeric> #include <type_traits> namespace cudf { size_type state_null_count(mask_state state, size_type size) { switch (state) { case mask_state::UNALLOCATED: return 0; case mask_state::UNINITIALIZED: return UNKNOWN_NULL_COUNT; case mask_state::ALL_NULL: return size; case mask_state::ALL_VALID: return 0; default: CUDF_FAIL("Invalid null mask state."); } } // Computes required allocation size of a bitmask std::size_t bitmask_allocation_size_bytes(size_type number_of_bits, std::size_t padding_boundary) { CUDF_EXPECTS(padding_boundary > 0, "Invalid padding boundary"); auto necessary_bytes = cudf::util::div_rounding_up_safe<size_type>(number_of_bits, CHAR_BIT); auto padded_bytes = padding_boundary * cudf::util::div_rounding_up_safe<size_type>( necessary_bytes, padding_boundary); return padded_bytes; } // Computes number of *actual* bitmask_type elements needed size_type num_bitmask_words(size_type number_of_bits) { return cudf::util::div_rounding_up_safe<size_type>(number_of_bits, detail::size_in_bits<bitmask_type>()); } namespace detail { // Create a device_buffer for a null mask rmm::device_buffer create_null_mask(size_type size, mask_state state, rmm::cuda_stream_view stream, rmm::mr::device_memory_resource *mr) { size_type mask_size{0}; if (state != mask_state::UNALLOCATED) { mask_size = bitmask_allocation_size_bytes(size); } rmm::device_buffer mask(mask_size, stream, mr); if (state != mask_state::UNINITIALIZED) { uint8_t fill_value = (state == mask_state::ALL_VALID) ? 0xff : 0x00; CUDA_TRY(hipMemsetAsync( static_cast<bitmask_type *>(mask.data()), fill_value, mask_size, stream.value())); } return mask; } namespace { __global__ void set_null_mask_kernel(bitmask_type *__restrict__ destination, size_type begin_bit, size_type end_bit, bool valid, size_type number_of_mask_words) { auto x = destination + word_index(begin_bit); const auto last_word = word_index(end_bit) - word_index(begin_bit); bitmask_type fill_value = (valid == true) ? 0xffffffff : 0x00; for (size_type destination_word_index = threadIdx.x + blockIdx.x * blockDim.x; destination_word_index < number_of_mask_words; destination_word_index += blockDim.x * gridDim.x) { if (destination_word_index == 0 || destination_word_index == last_word) { bitmask_type mask = ~bitmask_type{0}; if (destination_word_index == 0) { mask = ~(set_least_significant_bits(intra_word_index(begin_bit))); } if (destination_word_index == last_word) { mask = mask & set_least_significant_bits(intra_word_index(end_bit)); } x[destination_word_index] = (valid == true) ? x[destination_word_index] | mask : x[destination_word_index] & ~mask; } else { x[destination_word_index] = fill_value; } } } } // namespace // Set pre-allocated null mask of given bit range [begin_bit, end_bit) to valid, if valid==true, // or null, otherwise; void set_null_mask(bitmask_type *bitmask, size_type begin_bit, size_type end_bit, bool valid, rmm::cuda_stream_view stream) { CUDF_FUNC_RANGE(); CUDF_EXPECTS(begin_bit >= 0, "Invalid range."); CUDF_EXPECTS(begin_bit < end_bit, "Invalid bit range."); if (bitmask != nullptr) { auto number_of_mask_words = num_bitmask_words(end_bit) - begin_bit / detail::size_in_bits<bitmask_type>(); cudf::detail::grid_1d config(number_of_mask_words, 256); hipLaunchKernelGGL(( set_null_mask_kernel), dim3(config.num_blocks), dim3(config.num_threads_per_block), 0, stream.value(), static_cast<bitmask_type *>(bitmask), begin_bit, end_bit, valid, number_of_mask_words); CHECK_CUDA(stream.value()); } } } // namespace detail // Create a device_buffer for a null mask rmm::device_buffer create_null_mask(size_type size, mask_state state, rmm::mr::device_memory_resource *mr) { return detail::create_null_mask(size, state, rmm::cuda_stream_default, mr); } // Set pre-allocated null mask of given bit range [begin_bit, end_bit) to valid, if valid==true, // or null, otherwise; void set_null_mask(bitmask_type *bitmask, size_type begin_bit, size_type end_bit, bool valid) { return detail::set_null_mask(bitmask, begin_bit, end_bit, valid); } namespace { /** * @brief Counts the number of non-zero bits in a bitmask in the range * `[first_bit_index, last_bit_index]`. * * Expects `0 <= first_bit_index <= last_bit_index`. * * @param[in] bitmask The bitmask whose non-zero bits will be counted. * @param[in] first_bit_index The index (inclusive) of the first bit to count * @param[in] last_bit_index The index (inclusive) of the last bit to count * @param[out] global_count The number of non-zero bits in the specified range */ template <size_type block_size> __global__ void count_set_bits_kernel(bitmask_type const *bitmask, size_type first_bit_index, size_type last_bit_index, size_type *global_count) { constexpr auto const word_size{detail::size_in_bits<bitmask_type>()}; auto const first_word_index{word_index(first_bit_index)}; auto const last_word_index{word_index(last_bit_index)}; auto const tid = threadIdx.x + blockIdx.x * blockDim.x; auto thread_word_index = tid + first_word_index; size_type thread_count{0}; // First, just count the bits in all words while (thread_word_index <= last_word_index) { thread_count += __popc(bitmask[thread_word_index]); thread_word_index += blockDim.x * gridDim.x; } // Subtract any slack bits counted from the first and last word // Two threads handle this -- one for first word, one for last if (tid < 2) { bool const first{tid == 0}; bool const last{not first}; size_type bit_index = (first) ? first_bit_index : last_bit_index; size_type word_index = (first) ? first_word_index : last_word_index; size_type num_slack_bits = bit_index % word_size; if (last) { num_slack_bits = word_size - num_slack_bits - 1; } if (num_slack_bits > 0) { bitmask_type word = bitmask[word_index]; auto slack_mask = (first) ? set_least_significant_bits(num_slack_bits) : set_most_significant_bits(num_slack_bits); thread_count -= __popc(word & slack_mask); } } using BlockReduce = hipcub::BlockReduce<size_type, block_size>; __shared__ typename BlockReduce::TempStorage temp_storage; size_type block_count{BlockReduce(temp_storage).Sum(thread_count)}; if (threadIdx.x == 0) { atomicAdd(global_count, block_count); } } /** * For each range `[first_bit_indices[i], last_bit_indices[i])` * (where 0 <= i < `num_ranges`), count the number of bits set outside the range * in the boundary words (i.e. words that include either * `first_bit_indices[i]'th` bit or `(last_bit_indices[i] - 1)'th` bit) and * subtract the count from the range's null count. * * Expects `0 <= first_bit_indices[i] <= last_bit_indices[i]`. * * @param[in] bitmask The bitmask whose non-zero bits outside the range in the * boundary words will be counted. * @param[in] num_ranges The number of ranges * @param[in] first_bit_indices The indices (inclusive) of the first bit in each * range * @param[in] last_bit_indices The indices (exclusive) of the last bit in each * range * @param[in,out] null_counts The number of non-zero bits in each range to be * updated */ template <typename OffsetIterator, typename OutputIterator> __global__ void subtract_set_bits_range_boundaries_kerenel(bitmask_type const *bitmask, size_type num_ranges, OffsetIterator first_bit_indices, OffsetIterator last_bit_indices, OutputIterator null_counts) { constexpr size_type const word_size_in_bits{detail::size_in_bits<bitmask_type>()}; cudf::size_type const tid = threadIdx.x + blockIdx.x * blockDim.x; cudf::size_type range_id = tid; while (range_id < num_ranges) { size_type const first_bit_index = *(first_bit_indices + range_id); size_type const last_bit_index = *(last_bit_indices + range_id); size_type delta = 0; size_type num_slack_bits = 0; // compute delta due to the preceding bits in the first word in the range num_slack_bits = intra_word_index(first_bit_index); if (num_slack_bits > 0) { bitmask_type word = bitmask[word_index(first_bit_index)]; bitmask_type slack_mask = set_least_significant_bits(num_slack_bits); delta -= __popc(word & slack_mask); } // compute delta due to the following bits in the last word in the range num_slack_bits = (last_bit_index % word_size_in_bits) == 0 ? 0 : word_size_in_bits - intra_word_index(last_bit_index); if (num_slack_bits > 0) { bitmask_type word = bitmask[word_index(last_bit_index)]; bitmask_type slack_mask = set_most_significant_bits(num_slack_bits); delta -= __popc(word & slack_mask); } size_type updated_null_count = *(null_counts + range_id) + delta; *(null_counts + range_id) = updated_null_count; range_id += blockDim.x * gridDim.x; } } /** * @brief Copies the bits starting at the specified offset from a source * bitmask into the destination bitmask. * * Bit `i` in `destination` will be equal to bit `i + offset` from `source`. * * @param destination The mask to copy into * @param source The mask to copy from * @param source_begin_bit The offset into `source` from which to begin the copy * @param source_end_bit The offset into `source` till which copying is done * @param number_of_mask_words The number of `cudf::bitmask_type` words to copy */ // TODO: Also make binops test that uses offset in column_view __global__ void copy_offset_bitmask(bitmask_type *__restrict__ destination, bitmask_type const *__restrict__ source, size_type source_begin_bit, size_type source_end_bit, size_type number_of_mask_words) { for (size_type destination_word_index = threadIdx.x + blockIdx.x * blockDim.x; destination_word_index < number_of_mask_words; destination_word_index += blockDim.x * gridDim.x) { destination[destination_word_index] = detail::get_mask_offset_word( source, destination_word_index, source_begin_bit, source_end_bit); } } /** * @brief Computes the bitwise AND of an array of bitmasks * * @param destination The bitmask to write result into * @param source Array of source mask pointers. All masks must be of same size * @param begin_bit Array of offsets into corresponding @p source masks. * Must be same size as source array * @param num_sources Number of masks in @p source array * @param source_size Number of bits in each mask in @p source * @param number_of_mask_words The number of words of type bitmask_type to copy */ __global__ void offset_bitmask_and(bitmask_type *__restrict__ destination, bitmask_type const *const *__restrict__ source, size_type const *__restrict__ begin_bit, size_type num_sources, size_type source_size, size_type number_of_mask_words) { for (size_type destination_word_index = threadIdx.x + blockIdx.x * blockDim.x; destination_word_index < number_of_mask_words; destination_word_index += blockDim.x * gridDim.x) { bitmask_type destination_word = ~bitmask_type{0}; // All bits 1 for (size_type i = 0; i < num_sources; i++) { destination_word &= detail::get_mask_offset_word( source[i], destination_word_index, begin_bit[i], begin_bit[i] + source_size); } destination[destination_word_index] = destination_word; } } // convert [first_bit_index,last_bit_index) to // [first_word_index,last_word_index) struct to_word_index : public thrust::unary_function<size_type, size_type> { const bool _inclusive = false; size_type const *const _d_bit_indices = nullptr; /** * @brief Constructor of a functor that converts bit indices to bitmask word * indices. * * @param[in] inclusive Flag that indicates whether bit indices are inclusive * or exclusive. * @param[in] d_bit_indices Pointer to an array of bit indices */ __host__ to_word_index(bool inclusive, size_type const *d_bit_indices) : _inclusive(inclusive), _d_bit_indices(d_bit_indices) { } __device__ size_type operator()(const size_type &i) const { auto bit_index = _d_bit_indices[i]; return word_index(bit_index) + ((_inclusive || intra_word_index(bit_index) == 0) ? 0 : 1); } }; } // namespace namespace detail { // Create a bitmask from a specific range rmm::device_buffer copy_bitmask(bitmask_type const *mask, size_type begin_bit, size_type end_bit, rmm::cuda_stream_view stream, rmm::mr::device_memory_resource *mr) { CUDF_FUNC_RANGE(); CUDF_EXPECTS(begin_bit >= 0, "Invalid range."); CUDF_EXPECTS(begin_bit <= end_bit, "Invalid bit range."); rmm::device_buffer dest_mask{}; auto num_bytes = bitmask_allocation_size_bytes(end_bit - begin_bit); if ((mask == nullptr) || (num_bytes == 0)) { return dest_mask; } if (begin_bit == 0) { dest_mask = rmm::device_buffer{static_cast<void const *>(mask), num_bytes, stream, mr}; } else { auto number_of_mask_words = num_bitmask_words(end_bit - begin_bit); dest_mask = rmm::device_buffer{num_bytes, stream, mr}; cudf::detail::grid_1d config(number_of_mask_words, 256); hipLaunchKernelGGL(( copy_offset_bitmask), dim3(config.num_blocks), dim3(config.num_threads_per_block), 0, stream.value(), static_cast<bitmask_type *>(dest_mask.data()), mask, begin_bit, end_bit, number_of_mask_words); CHECK_CUDA(stream.value()); } return dest_mask; } // Create a bitmask from a column view rmm::device_buffer copy_bitmask(column_view const &view, rmm::cuda_stream_view stream, rmm::mr::device_memory_resource *mr) { CUDF_FUNC_RANGE(); rmm::device_buffer null_mask{0, stream, mr}; if (view.nullable()) { null_mask = copy_bitmask(view.null_mask(), view.offset(), view.offset() + view.size(), stream, mr); } return null_mask; } // Inplace Bitwise AND of the masks void inplace_bitmask_and(bitmask_type *dest_mask, std::vector<bitmask_type const *> const &masks, std::vector<size_type> const &begin_bits, size_type mask_size, rmm::cuda_stream_view stream, rmm::mr::device_memory_resource *mr) { CUDF_EXPECTS(std::all_of(begin_bits.begin(), begin_bits.end(), [](auto b) { return b >= 0; }), "Invalid range."); CUDF_EXPECTS(mask_size > 0, "Invalid bit range."); CUDF_EXPECTS(std::all_of(masks.begin(), masks.end(), [](auto p) { return p != nullptr; }), "Mask pointer cannot be null"); auto number_of_mask_words = num_bitmask_words(mask_size); rmm::device_vector<bitmask_type const *> d_masks(masks); rmm::device_vector<size_type> d_begin_bits(begin_bits); cudf::detail::grid_1d config(number_of_mask_words, 256); hipLaunchKernelGGL(( offset_bitmask_and), dim3(config.num_blocks), dim3(config.num_threads_per_block), 0, stream.value(), dest_mask, d_masks.data().get(), d_begin_bits.data().get(), d_masks.size(), mask_size, number_of_mask_words); CHECK_CUDA(stream.value()); } // Bitwise AND of the masks rmm::device_buffer bitmask_and(std::vector<bitmask_type const *> const &masks, std::vector<size_type> const &begin_bits, size_type mask_size, rmm::cuda_stream_view stream, rmm::mr::device_memory_resource *mr) { rmm::device_buffer dest_mask{}; auto num_bytes = bitmask_allocation_size_bytes(mask_size); dest_mask = rmm::device_buffer{num_bytes, stream, mr}; inplace_bitmask_and( static_cast<bitmask_type *>(dest_mask.data()), masks, begin_bits, mask_size, stream, mr); return dest_mask; } cudf::size_type count_set_bits(bitmask_type const *bitmask, size_type start, size_type stop, rmm::cuda_stream_view stream = rmm::cuda_stream_default) { if (nullptr == bitmask) { return 0; } CUDF_EXPECTS(start >= 0, "Invalid range."); CUDF_EXPECTS(start <= stop, "Invalid bit range."); std::size_t num_bits_to_count = stop - start; if (num_bits_to_count == 0) { return 0; } auto num_words = num_bitmask_words(num_bits_to_count); constexpr size_type block_size{256}; cudf::detail::grid_1d grid(num_words, block_size); rmm::device_scalar<size_type> non_zero_count(0, stream); hipLaunchKernelGGL(( count_set_bits_kernel<block_size>) , dim3(grid.num_blocks), dim3(grid.num_threads_per_block), 0, stream.value(), bitmask, start, stop - 1, non_zero_count.data()); return non_zero_count.value(stream); } cudf::size_type count_unset_bits(bitmask_type const *bitmask, size_type start, size_type stop, rmm::cuda_stream_view stream = rmm::cuda_stream_default) { if (nullptr == bitmask) { return 0; } auto num_bits = (stop - start); return (num_bits - detail::count_set_bits(bitmask, start, stop, stream)); } std::vector<size_type> segmented_count_set_bits(bitmask_type const *bitmask, std::vector<size_type> const &indices, rmm::cuda_stream_view stream) { CUDF_EXPECTS(indices.size() % 2 == 0, "Array of indices needs to have an even number of elements."); for (size_t i = 0; i < indices.size() / 2; i++) { auto begin = indices[i * 2]; auto end = indices[i * 2 + 1]; CUDF_EXPECTS(begin >= 0, "Starting index cannot be negative."); CUDF_EXPECTS(end >= begin, "End index cannot be smaller than the starting index."); } if (indices.empty()) { return std::vector<size_type>{}; } else if (bitmask == nullptr) { std::vector<size_type> ret(indices.size() / 2); for (size_t i = 0; i < indices.size() / 2; i++) { ret[i] = indices[2 * i + 1] - indices[2 * i]; } return ret; } size_type num_ranges = indices.size() / 2; thrust::host_vector<size_type> h_first_indices(num_ranges); thrust::host_vector<size_type> h_last_indices(num_ranges); thrust::stable_partition_copy(thrust::seq, std::begin(indices), std::end(indices), thrust::make_counting_iterator(0), h_first_indices.begin(), h_last_indices.begin(), [](auto i) { return (i % 2) == 0; }); rmm::device_vector<size_type> d_first_indices = h_first_indices; rmm::device_vector<size_type> d_last_indices = h_last_indices; rmm::device_vector<size_type> d_null_counts(num_ranges, 0); auto word_num_set_bits = thrust::make_transform_iterator( thrust::make_counting_iterator(0), [bitmask] __device__(auto i) { return static_cast<size_type>(__popc(bitmask[i])); }); auto first_word_indices = thrust::make_transform_iterator( thrust::make_counting_iterator(0), // We cannot use lambda as hipcub::DeviceSegmentedReduce::Sum() requires // first_word_indices and last_word_indices to have the same type. to_word_index(true, d_first_indices.data().get())); auto last_word_indices = thrust::make_transform_iterator( thrust::make_counting_iterator(0), // We cannot use lambda as hipcub::DeviceSegmentedReduce::Sum() requires // first_word_indices and last_word_indices to have the same type. to_word_index(false, d_last_indices.data().get())); // first allocate temporary memroy size_t temp_storage_bytes{0}; CUDA_TRY(hipcub::DeviceSegmentedReduce::Sum(nullptr, temp_storage_bytes, word_num_set_bits, d_null_counts.begin(), num_ranges, first_word_indices, last_word_indices, stream.value())); rmm::device_buffer d_temp_storage(temp_storage_bytes, stream); // second perform segmented reduction CUDA_TRY(hipcub::DeviceSegmentedReduce::Sum(d_temp_storage.data(), temp_storage_bytes, word_num_set_bits, d_null_counts.begin(), num_ranges, first_word_indices, last_word_indices, stream.value())); CHECK_CUDA(stream.value()); // third, adjust counts in segment boundaries (if segments are not // word-aligned) constexpr size_type block_size{256}; cudf::detail::grid_1d grid(num_ranges, block_size); hipLaunchKernelGGL(( subtract_set_bits_range_boundaries_kerenel), dim3(grid.num_blocks), dim3(grid.num_threads_per_block), 0, stream.value(), bitmask, num_ranges, d_first_indices.begin(), d_last_indices.begin(), d_null_counts.begin()); CHECK_CUDA(stream.value()); std::vector<size_type> ret(num_ranges); CUDA_TRY(hipMemcpyAsync(ret.data(), d_null_counts.data().get(), num_ranges * sizeof(size_type), hipMemcpyDeviceToHost, stream.value())); stream.synchronize(); // now ret is valid. return ret; } std::vector<size_type> segmented_count_unset_bits(bitmask_type const *bitmask, std::vector<size_type> const &indices, rmm::cuda_stream_view stream) { if (indices.empty()) { return std::vector<size_type>{}; } else if (bitmask == nullptr) { return std::vector<size_type>(indices.size() / 2, 0); } auto ret = segmented_count_set_bits(bitmask, indices, stream); for (size_t i = 0; i < ret.size(); i++) { auto begin = indices[i * 2]; auto end = indices[i * 2 + 1]; ret[i] = (end - begin) - ret[i]; } return ret; } // Returns the bitwise AND of the null masks of all columns in the table view rmm::device_buffer bitmask_and(table_view const &view, rmm::cuda_stream_view stream, rmm::mr::device_memory_resource *mr) { CUDF_FUNC_RANGE(); rmm::device_buffer null_mask{0, stream, mr}; if (view.num_rows() == 0 or view.num_columns() == 0) { return null_mask; } std::vector<bitmask_type const *> masks; std::vector<size_type> offsets; for (auto &&col : view) { if (col.nullable()) { masks.push_back(col.null_mask()); offsets.push_back(col.offset()); } } if (masks.size() > 0) { return cudf::detail::bitmask_and(masks, offsets, view.num_rows(), stream, mr); } return null_mask; } } // namespace detail // Count non-zero bits in the specified range cudf::size_type count_set_bits(bitmask_type const *bitmask, size_type start, size_type stop) { CUDF_FUNC_RANGE(); return detail::count_set_bits(bitmask, start, stop); } // Count zero bits in the specified range cudf::size_type count_unset_bits(bitmask_type const *bitmask, size_type start, size_type stop) { CUDF_FUNC_RANGE(); return detail::count_unset_bits(bitmask, start, stop); } // Count non-zero bits in the specified ranges std::vector<size_type> segmented_count_set_bits(bitmask_type const *bitmask, std::vector<size_type> const &indices) { CUDF_FUNC_RANGE(); return detail::segmented_count_set_bits(bitmask, indices, rmm::cuda_stream_default); } // Count zero bits in the specified ranges std::vector<size_type> segmented_count_unset_bits(bitmask_type const *bitmask, std::vector<size_type> const &indices) { CUDF_FUNC_RANGE(); return detail::segmented_count_unset_bits(bitmask, indices, rmm::cuda_stream_default); } // Create a bitmask from a specific range rmm::device_buffer copy_bitmask(bitmask_type const *mask, size_type begin_bit, size_type end_bit, rmm::mr::device_memory_resource *mr) { return detail::copy_bitmask(mask, begin_bit, end_bit, rmm::cuda_stream_default, mr); } // Create a bitmask from a column view rmm::device_buffer copy_bitmask(column_view const &view, rmm::mr::device_memory_resource *mr) { return detail::copy_bitmask(view, rmm::cuda_stream_default, mr); } rmm::device_buffer bitmask_and(table_view const &view, rmm::mr::device_memory_resource *mr) { return detail::bitmask_and(view, rmm::cuda_stream_default, mr); } } // namespace cudf
cafb4689ab3d3424f6f53be3849ceb3555c1ea18.cu
/* * Copyright (c) 2019-2020, NVIDIA CORPORATION. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #include <cudf/column/column_device_view.cuh> #include <cudf/detail/null_mask.hpp> #include <cudf/detail/nvtx/ranges.hpp> #include <cudf/detail/utilities/cuda.cuh> #include <cudf/detail/utilities/integer_utils.hpp> #include <cudf/null_mask.hpp> #include <cudf/table/table_view.hpp> #include <cudf/utilities/bit.hpp> #include <cudf/utilities/error.hpp> #include <rmm/cuda_stream_view.hpp> #include <rmm/device_buffer.hpp> #include <rmm/device_scalar.hpp> #include <rmm/device_vector.hpp> #include <rmm/mr/device/device_memory_resource.hpp> #include <thrust/binary_search.h> #include <thrust/copy.h> #include <thrust/device_ptr.h> #include <thrust/extrema.h> #include <cub/cub.cuh> #include <algorithm> #include <numeric> #include <type_traits> namespace cudf { size_type state_null_count(mask_state state, size_type size) { switch (state) { case mask_state::UNALLOCATED: return 0; case mask_state::UNINITIALIZED: return UNKNOWN_NULL_COUNT; case mask_state::ALL_NULL: return size; case mask_state::ALL_VALID: return 0; default: CUDF_FAIL("Invalid null mask state."); } } // Computes required allocation size of a bitmask std::size_t bitmask_allocation_size_bytes(size_type number_of_bits, std::size_t padding_boundary) { CUDF_EXPECTS(padding_boundary > 0, "Invalid padding boundary"); auto necessary_bytes = cudf::util::div_rounding_up_safe<size_type>(number_of_bits, CHAR_BIT); auto padded_bytes = padding_boundary * cudf::util::div_rounding_up_safe<size_type>( necessary_bytes, padding_boundary); return padded_bytes; } // Computes number of *actual* bitmask_type elements needed size_type num_bitmask_words(size_type number_of_bits) { return cudf::util::div_rounding_up_safe<size_type>(number_of_bits, detail::size_in_bits<bitmask_type>()); } namespace detail { // Create a device_buffer for a null mask rmm::device_buffer create_null_mask(size_type size, mask_state state, rmm::cuda_stream_view stream, rmm::mr::device_memory_resource *mr) { size_type mask_size{0}; if (state != mask_state::UNALLOCATED) { mask_size = bitmask_allocation_size_bytes(size); } rmm::device_buffer mask(mask_size, stream, mr); if (state != mask_state::UNINITIALIZED) { uint8_t fill_value = (state == mask_state::ALL_VALID) ? 0xff : 0x00; CUDA_TRY(cudaMemsetAsync( static_cast<bitmask_type *>(mask.data()), fill_value, mask_size, stream.value())); } return mask; } namespace { __global__ void set_null_mask_kernel(bitmask_type *__restrict__ destination, size_type begin_bit, size_type end_bit, bool valid, size_type number_of_mask_words) { auto x = destination + word_index(begin_bit); const auto last_word = word_index(end_bit) - word_index(begin_bit); bitmask_type fill_value = (valid == true) ? 0xffffffff : 0x00; for (size_type destination_word_index = threadIdx.x + blockIdx.x * blockDim.x; destination_word_index < number_of_mask_words; destination_word_index += blockDim.x * gridDim.x) { if (destination_word_index == 0 || destination_word_index == last_word) { bitmask_type mask = ~bitmask_type{0}; if (destination_word_index == 0) { mask = ~(set_least_significant_bits(intra_word_index(begin_bit))); } if (destination_word_index == last_word) { mask = mask & set_least_significant_bits(intra_word_index(end_bit)); } x[destination_word_index] = (valid == true) ? x[destination_word_index] | mask : x[destination_word_index] & ~mask; } else { x[destination_word_index] = fill_value; } } } } // namespace // Set pre-allocated null mask of given bit range [begin_bit, end_bit) to valid, if valid==true, // or null, otherwise; void set_null_mask(bitmask_type *bitmask, size_type begin_bit, size_type end_bit, bool valid, rmm::cuda_stream_view stream) { CUDF_FUNC_RANGE(); CUDF_EXPECTS(begin_bit >= 0, "Invalid range."); CUDF_EXPECTS(begin_bit < end_bit, "Invalid bit range."); if (bitmask != nullptr) { auto number_of_mask_words = num_bitmask_words(end_bit) - begin_bit / detail::size_in_bits<bitmask_type>(); cudf::detail::grid_1d config(number_of_mask_words, 256); set_null_mask_kernel<<<config.num_blocks, config.num_threads_per_block, 0, stream.value()>>>( static_cast<bitmask_type *>(bitmask), begin_bit, end_bit, valid, number_of_mask_words); CHECK_CUDA(stream.value()); } } } // namespace detail // Create a device_buffer for a null mask rmm::device_buffer create_null_mask(size_type size, mask_state state, rmm::mr::device_memory_resource *mr) { return detail::create_null_mask(size, state, rmm::cuda_stream_default, mr); } // Set pre-allocated null mask of given bit range [begin_bit, end_bit) to valid, if valid==true, // or null, otherwise; void set_null_mask(bitmask_type *bitmask, size_type begin_bit, size_type end_bit, bool valid) { return detail::set_null_mask(bitmask, begin_bit, end_bit, valid); } namespace { /** * @brief Counts the number of non-zero bits in a bitmask in the range * `[first_bit_index, last_bit_index]`. * * Expects `0 <= first_bit_index <= last_bit_index`. * * @param[in] bitmask The bitmask whose non-zero bits will be counted. * @param[in] first_bit_index The index (inclusive) of the first bit to count * @param[in] last_bit_index The index (inclusive) of the last bit to count * @param[out] global_count The number of non-zero bits in the specified range */ template <size_type block_size> __global__ void count_set_bits_kernel(bitmask_type const *bitmask, size_type first_bit_index, size_type last_bit_index, size_type *global_count) { constexpr auto const word_size{detail::size_in_bits<bitmask_type>()}; auto const first_word_index{word_index(first_bit_index)}; auto const last_word_index{word_index(last_bit_index)}; auto const tid = threadIdx.x + blockIdx.x * blockDim.x; auto thread_word_index = tid + first_word_index; size_type thread_count{0}; // First, just count the bits in all words while (thread_word_index <= last_word_index) { thread_count += __popc(bitmask[thread_word_index]); thread_word_index += blockDim.x * gridDim.x; } // Subtract any slack bits counted from the first and last word // Two threads handle this -- one for first word, one for last if (tid < 2) { bool const first{tid == 0}; bool const last{not first}; size_type bit_index = (first) ? first_bit_index : last_bit_index; size_type word_index = (first) ? first_word_index : last_word_index; size_type num_slack_bits = bit_index % word_size; if (last) { num_slack_bits = word_size - num_slack_bits - 1; } if (num_slack_bits > 0) { bitmask_type word = bitmask[word_index]; auto slack_mask = (first) ? set_least_significant_bits(num_slack_bits) : set_most_significant_bits(num_slack_bits); thread_count -= __popc(word & slack_mask); } } using BlockReduce = cub::BlockReduce<size_type, block_size>; __shared__ typename BlockReduce::TempStorage temp_storage; size_type block_count{BlockReduce(temp_storage).Sum(thread_count)}; if (threadIdx.x == 0) { atomicAdd(global_count, block_count); } } /** * For each range `[first_bit_indices[i], last_bit_indices[i])` * (where 0 <= i < `num_ranges`), count the number of bits set outside the range * in the boundary words (i.e. words that include either * `first_bit_indices[i]'th` bit or `(last_bit_indices[i] - 1)'th` bit) and * subtract the count from the range's null count. * * Expects `0 <= first_bit_indices[i] <= last_bit_indices[i]`. * * @param[in] bitmask The bitmask whose non-zero bits outside the range in the * boundary words will be counted. * @param[in] num_ranges The number of ranges * @param[in] first_bit_indices The indices (inclusive) of the first bit in each * range * @param[in] last_bit_indices The indices (exclusive) of the last bit in each * range * @param[in,out] null_counts The number of non-zero bits in each range to be * updated */ template <typename OffsetIterator, typename OutputIterator> __global__ void subtract_set_bits_range_boundaries_kerenel(bitmask_type const *bitmask, size_type num_ranges, OffsetIterator first_bit_indices, OffsetIterator last_bit_indices, OutputIterator null_counts) { constexpr size_type const word_size_in_bits{detail::size_in_bits<bitmask_type>()}; cudf::size_type const tid = threadIdx.x + blockIdx.x * blockDim.x; cudf::size_type range_id = tid; while (range_id < num_ranges) { size_type const first_bit_index = *(first_bit_indices + range_id); size_type const last_bit_index = *(last_bit_indices + range_id); size_type delta = 0; size_type num_slack_bits = 0; // compute delta due to the preceding bits in the first word in the range num_slack_bits = intra_word_index(first_bit_index); if (num_slack_bits > 0) { bitmask_type word = bitmask[word_index(first_bit_index)]; bitmask_type slack_mask = set_least_significant_bits(num_slack_bits); delta -= __popc(word & slack_mask); } // compute delta due to the following bits in the last word in the range num_slack_bits = (last_bit_index % word_size_in_bits) == 0 ? 0 : word_size_in_bits - intra_word_index(last_bit_index); if (num_slack_bits > 0) { bitmask_type word = bitmask[word_index(last_bit_index)]; bitmask_type slack_mask = set_most_significant_bits(num_slack_bits); delta -= __popc(word & slack_mask); } size_type updated_null_count = *(null_counts + range_id) + delta; *(null_counts + range_id) = updated_null_count; range_id += blockDim.x * gridDim.x; } } /** * @brief Copies the bits starting at the specified offset from a source * bitmask into the destination bitmask. * * Bit `i` in `destination` will be equal to bit `i + offset` from `source`. * * @param destination The mask to copy into * @param source The mask to copy from * @param source_begin_bit The offset into `source` from which to begin the copy * @param source_end_bit The offset into `source` till which copying is done * @param number_of_mask_words The number of `cudf::bitmask_type` words to copy */ // TODO: Also make binops test that uses offset in column_view __global__ void copy_offset_bitmask(bitmask_type *__restrict__ destination, bitmask_type const *__restrict__ source, size_type source_begin_bit, size_type source_end_bit, size_type number_of_mask_words) { for (size_type destination_word_index = threadIdx.x + blockIdx.x * blockDim.x; destination_word_index < number_of_mask_words; destination_word_index += blockDim.x * gridDim.x) { destination[destination_word_index] = detail::get_mask_offset_word( source, destination_word_index, source_begin_bit, source_end_bit); } } /** * @brief Computes the bitwise AND of an array of bitmasks * * @param destination The bitmask to write result into * @param source Array of source mask pointers. All masks must be of same size * @param begin_bit Array of offsets into corresponding @p source masks. * Must be same size as source array * @param num_sources Number of masks in @p source array * @param source_size Number of bits in each mask in @p source * @param number_of_mask_words The number of words of type bitmask_type to copy */ __global__ void offset_bitmask_and(bitmask_type *__restrict__ destination, bitmask_type const *const *__restrict__ source, size_type const *__restrict__ begin_bit, size_type num_sources, size_type source_size, size_type number_of_mask_words) { for (size_type destination_word_index = threadIdx.x + blockIdx.x * blockDim.x; destination_word_index < number_of_mask_words; destination_word_index += blockDim.x * gridDim.x) { bitmask_type destination_word = ~bitmask_type{0}; // All bits 1 for (size_type i = 0; i < num_sources; i++) { destination_word &= detail::get_mask_offset_word( source[i], destination_word_index, begin_bit[i], begin_bit[i] + source_size); } destination[destination_word_index] = destination_word; } } // convert [first_bit_index,last_bit_index) to // [first_word_index,last_word_index) struct to_word_index : public thrust::unary_function<size_type, size_type> { const bool _inclusive = false; size_type const *const _d_bit_indices = nullptr; /** * @brief Constructor of a functor that converts bit indices to bitmask word * indices. * * @param[in] inclusive Flag that indicates whether bit indices are inclusive * or exclusive. * @param[in] d_bit_indices Pointer to an array of bit indices */ __host__ to_word_index(bool inclusive, size_type const *d_bit_indices) : _inclusive(inclusive), _d_bit_indices(d_bit_indices) { } __device__ size_type operator()(const size_type &i) const { auto bit_index = _d_bit_indices[i]; return word_index(bit_index) + ((_inclusive || intra_word_index(bit_index) == 0) ? 0 : 1); } }; } // namespace namespace detail { // Create a bitmask from a specific range rmm::device_buffer copy_bitmask(bitmask_type const *mask, size_type begin_bit, size_type end_bit, rmm::cuda_stream_view stream, rmm::mr::device_memory_resource *mr) { CUDF_FUNC_RANGE(); CUDF_EXPECTS(begin_bit >= 0, "Invalid range."); CUDF_EXPECTS(begin_bit <= end_bit, "Invalid bit range."); rmm::device_buffer dest_mask{}; auto num_bytes = bitmask_allocation_size_bytes(end_bit - begin_bit); if ((mask == nullptr) || (num_bytes == 0)) { return dest_mask; } if (begin_bit == 0) { dest_mask = rmm::device_buffer{static_cast<void const *>(mask), num_bytes, stream, mr}; } else { auto number_of_mask_words = num_bitmask_words(end_bit - begin_bit); dest_mask = rmm::device_buffer{num_bytes, stream, mr}; cudf::detail::grid_1d config(number_of_mask_words, 256); copy_offset_bitmask<<<config.num_blocks, config.num_threads_per_block, 0, stream.value()>>>( static_cast<bitmask_type *>(dest_mask.data()), mask, begin_bit, end_bit, number_of_mask_words); CHECK_CUDA(stream.value()); } return dest_mask; } // Create a bitmask from a column view rmm::device_buffer copy_bitmask(column_view const &view, rmm::cuda_stream_view stream, rmm::mr::device_memory_resource *mr) { CUDF_FUNC_RANGE(); rmm::device_buffer null_mask{0, stream, mr}; if (view.nullable()) { null_mask = copy_bitmask(view.null_mask(), view.offset(), view.offset() + view.size(), stream, mr); } return null_mask; } // Inplace Bitwise AND of the masks void inplace_bitmask_and(bitmask_type *dest_mask, std::vector<bitmask_type const *> const &masks, std::vector<size_type> const &begin_bits, size_type mask_size, rmm::cuda_stream_view stream, rmm::mr::device_memory_resource *mr) { CUDF_EXPECTS(std::all_of(begin_bits.begin(), begin_bits.end(), [](auto b) { return b >= 0; }), "Invalid range."); CUDF_EXPECTS(mask_size > 0, "Invalid bit range."); CUDF_EXPECTS(std::all_of(masks.begin(), masks.end(), [](auto p) { return p != nullptr; }), "Mask pointer cannot be null"); auto number_of_mask_words = num_bitmask_words(mask_size); rmm::device_vector<bitmask_type const *> d_masks(masks); rmm::device_vector<size_type> d_begin_bits(begin_bits); cudf::detail::grid_1d config(number_of_mask_words, 256); offset_bitmask_and<<<config.num_blocks, config.num_threads_per_block, 0, stream.value()>>>( dest_mask, d_masks.data().get(), d_begin_bits.data().get(), d_masks.size(), mask_size, number_of_mask_words); CHECK_CUDA(stream.value()); } // Bitwise AND of the masks rmm::device_buffer bitmask_and(std::vector<bitmask_type const *> const &masks, std::vector<size_type> const &begin_bits, size_type mask_size, rmm::cuda_stream_view stream, rmm::mr::device_memory_resource *mr) { rmm::device_buffer dest_mask{}; auto num_bytes = bitmask_allocation_size_bytes(mask_size); dest_mask = rmm::device_buffer{num_bytes, stream, mr}; inplace_bitmask_and( static_cast<bitmask_type *>(dest_mask.data()), masks, begin_bits, mask_size, stream, mr); return dest_mask; } cudf::size_type count_set_bits(bitmask_type const *bitmask, size_type start, size_type stop, rmm::cuda_stream_view stream = rmm::cuda_stream_default) { if (nullptr == bitmask) { return 0; } CUDF_EXPECTS(start >= 0, "Invalid range."); CUDF_EXPECTS(start <= stop, "Invalid bit range."); std::size_t num_bits_to_count = stop - start; if (num_bits_to_count == 0) { return 0; } auto num_words = num_bitmask_words(num_bits_to_count); constexpr size_type block_size{256}; cudf::detail::grid_1d grid(num_words, block_size); rmm::device_scalar<size_type> non_zero_count(0, stream); count_set_bits_kernel<block_size> <<<grid.num_blocks, grid.num_threads_per_block, 0, stream.value()>>>( bitmask, start, stop - 1, non_zero_count.data()); return non_zero_count.value(stream); } cudf::size_type count_unset_bits(bitmask_type const *bitmask, size_type start, size_type stop, rmm::cuda_stream_view stream = rmm::cuda_stream_default) { if (nullptr == bitmask) { return 0; } auto num_bits = (stop - start); return (num_bits - detail::count_set_bits(bitmask, start, stop, stream)); } std::vector<size_type> segmented_count_set_bits(bitmask_type const *bitmask, std::vector<size_type> const &indices, rmm::cuda_stream_view stream) { CUDF_EXPECTS(indices.size() % 2 == 0, "Array of indices needs to have an even number of elements."); for (size_t i = 0; i < indices.size() / 2; i++) { auto begin = indices[i * 2]; auto end = indices[i * 2 + 1]; CUDF_EXPECTS(begin >= 0, "Starting index cannot be negative."); CUDF_EXPECTS(end >= begin, "End index cannot be smaller than the starting index."); } if (indices.empty()) { return std::vector<size_type>{}; } else if (bitmask == nullptr) { std::vector<size_type> ret(indices.size() / 2); for (size_t i = 0; i < indices.size() / 2; i++) { ret[i] = indices[2 * i + 1] - indices[2 * i]; } return ret; } size_type num_ranges = indices.size() / 2; thrust::host_vector<size_type> h_first_indices(num_ranges); thrust::host_vector<size_type> h_last_indices(num_ranges); thrust::stable_partition_copy(thrust::seq, std::begin(indices), std::end(indices), thrust::make_counting_iterator(0), h_first_indices.begin(), h_last_indices.begin(), [](auto i) { return (i % 2) == 0; }); rmm::device_vector<size_type> d_first_indices = h_first_indices; rmm::device_vector<size_type> d_last_indices = h_last_indices; rmm::device_vector<size_type> d_null_counts(num_ranges, 0); auto word_num_set_bits = thrust::make_transform_iterator( thrust::make_counting_iterator(0), [bitmask] __device__(auto i) { return static_cast<size_type>(__popc(bitmask[i])); }); auto first_word_indices = thrust::make_transform_iterator( thrust::make_counting_iterator(0), // We cannot use lambda as cub::DeviceSegmentedReduce::Sum() requires // first_word_indices and last_word_indices to have the same type. to_word_index(true, d_first_indices.data().get())); auto last_word_indices = thrust::make_transform_iterator( thrust::make_counting_iterator(0), // We cannot use lambda as cub::DeviceSegmentedReduce::Sum() requires // first_word_indices and last_word_indices to have the same type. to_word_index(false, d_last_indices.data().get())); // first allocate temporary memroy size_t temp_storage_bytes{0}; CUDA_TRY(cub::DeviceSegmentedReduce::Sum(nullptr, temp_storage_bytes, word_num_set_bits, d_null_counts.begin(), num_ranges, first_word_indices, last_word_indices, stream.value())); rmm::device_buffer d_temp_storage(temp_storage_bytes, stream); // second perform segmented reduction CUDA_TRY(cub::DeviceSegmentedReduce::Sum(d_temp_storage.data(), temp_storage_bytes, word_num_set_bits, d_null_counts.begin(), num_ranges, first_word_indices, last_word_indices, stream.value())); CHECK_CUDA(stream.value()); // third, adjust counts in segment boundaries (if segments are not // word-aligned) constexpr size_type block_size{256}; cudf::detail::grid_1d grid(num_ranges, block_size); subtract_set_bits_range_boundaries_kerenel<<<grid.num_blocks, grid.num_threads_per_block, 0, stream.value()>>>( bitmask, num_ranges, d_first_indices.begin(), d_last_indices.begin(), d_null_counts.begin()); CHECK_CUDA(stream.value()); std::vector<size_type> ret(num_ranges); CUDA_TRY(cudaMemcpyAsync(ret.data(), d_null_counts.data().get(), num_ranges * sizeof(size_type), cudaMemcpyDeviceToHost, stream.value())); stream.synchronize(); // now ret is valid. return ret; } std::vector<size_type> segmented_count_unset_bits(bitmask_type const *bitmask, std::vector<size_type> const &indices, rmm::cuda_stream_view stream) { if (indices.empty()) { return std::vector<size_type>{}; } else if (bitmask == nullptr) { return std::vector<size_type>(indices.size() / 2, 0); } auto ret = segmented_count_set_bits(bitmask, indices, stream); for (size_t i = 0; i < ret.size(); i++) { auto begin = indices[i * 2]; auto end = indices[i * 2 + 1]; ret[i] = (end - begin) - ret[i]; } return ret; } // Returns the bitwise AND of the null masks of all columns in the table view rmm::device_buffer bitmask_and(table_view const &view, rmm::cuda_stream_view stream, rmm::mr::device_memory_resource *mr) { CUDF_FUNC_RANGE(); rmm::device_buffer null_mask{0, stream, mr}; if (view.num_rows() == 0 or view.num_columns() == 0) { return null_mask; } std::vector<bitmask_type const *> masks; std::vector<size_type> offsets; for (auto &&col : view) { if (col.nullable()) { masks.push_back(col.null_mask()); offsets.push_back(col.offset()); } } if (masks.size() > 0) { return cudf::detail::bitmask_and(masks, offsets, view.num_rows(), stream, mr); } return null_mask; } } // namespace detail // Count non-zero bits in the specified range cudf::size_type count_set_bits(bitmask_type const *bitmask, size_type start, size_type stop) { CUDF_FUNC_RANGE(); return detail::count_set_bits(bitmask, start, stop); } // Count zero bits in the specified range cudf::size_type count_unset_bits(bitmask_type const *bitmask, size_type start, size_type stop) { CUDF_FUNC_RANGE(); return detail::count_unset_bits(bitmask, start, stop); } // Count non-zero bits in the specified ranges std::vector<size_type> segmented_count_set_bits(bitmask_type const *bitmask, std::vector<size_type> const &indices) { CUDF_FUNC_RANGE(); return detail::segmented_count_set_bits(bitmask, indices, rmm::cuda_stream_default); } // Count zero bits in the specified ranges std::vector<size_type> segmented_count_unset_bits(bitmask_type const *bitmask, std::vector<size_type> const &indices) { CUDF_FUNC_RANGE(); return detail::segmented_count_unset_bits(bitmask, indices, rmm::cuda_stream_default); } // Create a bitmask from a specific range rmm::device_buffer copy_bitmask(bitmask_type const *mask, size_type begin_bit, size_type end_bit, rmm::mr::device_memory_resource *mr) { return detail::copy_bitmask(mask, begin_bit, end_bit, rmm::cuda_stream_default, mr); } // Create a bitmask from a column view rmm::device_buffer copy_bitmask(column_view const &view, rmm::mr::device_memory_resource *mr) { return detail::copy_bitmask(view, rmm::cuda_stream_default, mr); } rmm::device_buffer bitmask_and(table_view const &view, rmm::mr::device_memory_resource *mr) { return detail::bitmask_and(view, rmm::cuda_stream_default, mr); } } // namespace cudf
d968eac8040eff4c5670de5d06a2ae8e1abbcb0e.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include <stdio.h> #include <stdlib.h> #include <math.h> #include <stdio.h> #include <stdlib.h> #include <math.h> __global__ void mardas(int imax, int jmax, int kmax, int n1, int n2, int n3, int N, int iterations, float* V, float* g, float *R, float w, float h, int oddEven) { int index_x = threadIdx.x + blockDim.x * blockIdx.x; int stride_x = blockDim.x * gridDim.x; for (int I = index_x; I < N; I +=stride_x) { V[I] = 1000; } } __global__ void poisson_solve_1it_cu(int imax, int jmax, int kmax, int n1, int n2, int n3, int N, float* V, float* g, float *R, float w, float h, int oddEven) { int I = threadIdx.x + blockDim.x * blockIdx.x; //int stride_x = blockDim.x * gridDim.x; //for (int I = index_x; I < N; I +=stride_x) { int k = I % n3; int s1 = (I - k) / n3; int j = s1 % n2; int i = (s1 - j) / n2; if (i * j * k == 0 || i >= imax-1 || j >= jmax-1 || k >= kmax-1) return; if ((i+j+k)%2==oddEven) return; R[k + n3 * (j + n2 * (i))]= (V[k + n3 * (j + n2 * (i+1))]+ V[k + n3 * (j + n2 * (i-1))]+ V[k + n3 * (j+1 + n2 * (i))]+ V[k + n3 * (j-1 + n2 * (i))]+ V[k+1 + n3 * (j + n2 * (i))]+ V[k-1 + n3 * (j + n2 * (i))] ) / 6.0 - V[k + n3 * (j + n2 * (i))]- (h*h)*g[k + n3 * (j + n2 * (i))]/6.0; V[k + n3 * (j + n2 * (i))] += w*R[k + n3 * (j + n2 * (i))]; //} } __global__ void before_poisson_cu(int imax, int jmax, int kmax, float* ne,float* ni, float *g, float* g_temp, float *values) { int index_x = threadIdx.x + blockDim.x * blockIdx.x; int stride_x = blockDim.x * gridDim.x; int n1=imax+3, n2 = jmax+3, n3 = kmax+3,i,j,k,myTime,kk,I,N,s1; N=n1*n2*n3; float qi = values[0]; float qe = values[1]; float w = values[10]; float eps0 = values[11]; float h = values[24]; for (int I=index_x; I<N; I+=stride_x) g_temp[I] = w*h*h*g[I]/6.; for (int I=index_x; I<N; I+=stride_x) { int k = I % n3; int s1 = (I - k) / n3; int j = s1 % n2; int i = (s1 - j) / n2; if (i >= 1 && i < imax-1 && j >= 1 && j < jmax-1 && k >= 1 && k < kmax-1) { g_temp[I] += w/6.*(g_temp[I-1]+g_temp[I-n3]+g_temp[I-n3*n2]); } else { g_temp[I] = 0; } } for ( I = index_x; I < N; I += stride_x) { k = I % n3; s1 = (I - k) / n3; j = s1 % n2; i = (s1 - j) / n2; if (i * j * k == 0 || i >= imax-1 || j >= jmax-1 || k >= kmax-1) continue; g[k + n3 * (j + n2 * (i))]=-(ne[k + n3 * (j + n2 * (i))]*qe+ni[k + n3 * (j + n2 * (i))]*qi)/eps0; } } __global__ void after_poisson_cu(int imax, int jmax, int kmax, float *ne, float *ni , float *difxne, float *difyne, float *difxni , float *difyni, float *difxyne, float *difxyni, float *Exy, float *fexy , float *fixy, float *R, float *Ex, float *Ey , float *fex, float *fey, float *fix, float *fiy, float *V, float *difzne, float *difzni, float *Ez, float *fez, float *fiz , float *values, float *sf_temp) { int index_x = threadIdx.x + blockDim.x * blockIdx.x; int stride_x = blockDim.x * gridDim.x; int I = index_x; int n1=imax+3, n2 = jmax+3, n3 = kmax+3,i,j,k,myTime,kk,N,s1; N=n1*n2*n3; float q = values[7]; float me = values[16]; float mi = values[17]; float nue = values[18]; float nui = values[19]; float denominator_e = values[20]; float denominator_i = values[21]; float dt = values[23]; float h = values[24]; float wce = values[25]; float wci = values[26]; float mue = values[27]; float mui = values[28]; float dife = values[29]; float difi = values[30]; k = I % n3; s1 = (I - k) / n3; j = s1 % n2; i = (s1 - j) / n2; if (i >= imax-1 || j >= jmax || k >= kmax) return; Ex[k + n3 * (j + n2 * (i))]= (V[k + n3 * (j + n2 * (i))]-V[k + n3 * (j + n2 * (i+1))])/h; difxne[k + n3 * (j + n2 * (i))]=(ne[k + n3 * (j + n2 * (i+1))]-ne[k + n3 * (j + n2 * (i))])/h; difxni[k + n3 * (j + n2 * (i))]=(ni[k + n3 * (j + n2 * (i+1))]-ni[k + n3 * (j + n2 * (i))])/h; Ey[k + n3 * (j + n2 * (i))]= (V[k + n3 * (j + n2 * (i))]-V[k + n3 * (j+1 + n2 * (i))])/h; difyne[k + n3 * (j + n2 * (i))]=(ne[k + n3 * (j+1 + n2 * (i))]-ne[k + n3 * (j + n2 * (i))])/h; difyni[k + n3 * (j + n2 * (i))]=(ni[k + n3 * (j+1 + n2 * (i))]-ni[k + n3 * (j + n2 * (i))])/h; Ez[k + n3 * (j + n2 * (i))]= (V[k + n3 * (j + n2 * (i))]-V[k+1 + n3 * (j + n2 * (i))])/h; difzne[k + n3 * (j + n2 * (i))]=(ne[k+1 + n3 * (j + n2 * (i))]-ne[k + n3 * (j + n2 * (i))])/h; difzni[k + n3 * (j + n2 * (i))]=(ni[k+1 + n3 * (j + n2 * (i))]-ni[k + n3 * (j + n2 * (i))])/h; // ----------------------------------------------------------------------------------------------- /* Since I am using mid points for Calculating electric field and density gradient, to calculate Ex at any point that I don't have it directly, the average over the neighboring points is used instead. these average variables are, exy, fexy, fixy, ...*/ // Calculating the average values of Ex and gradiant_x Exy[k + n3 * (j + n2 * (i))]= 0.0 ; difxyne[k + n3 * (j + n2 * (i))]=0.0; difxyni[k + n3 * (j + n2 * (i))]=0.0; Exy[k + n3 * (j + n2 * (i))]= 0.25*(Ex[k + n3 * (j + n2 * (i))]+Ex[k + n3 * (j+1 + n2 * (i))]+Ex[k + n3 * (j + n2 * (i-1))]+Ex[k + n3 * (j+1 + n2 * (i-1))]) ; difxyne[k + n3 * (j + n2 * (i))]=0.25*(difxne[k + n3 * (j + n2 * (i))]+difxne[k + n3 * (j+1 + n2 * (i))]+difxne[k + n3 * (j + n2 * (i-1))]+difxne[k + n3 * (j+1 + n2 * (i-1))]); difxyni[k + n3 * (j + n2 * (i))]=0.25*(difxni[k + n3 * (j + n2 * (i))]+difxni[k + n3 * (j+1 + n2 * (i))]+difxni[k + n3 * (j + n2 * (i-1))]+difxni[k + n3 * (j+1 + n2 * (i-1))]); // ----------------------------------------------------------------------------------------------- // Here we calculate the fluxes in y direction fey[k + n3 * (j + n2 * (i))]= (-0.5*(ne[k + n3 * (j+1 + n2 * (i))]+ne[k + n3 * (j + n2 * (i))])*mue*Ey[k + n3 * (j + n2 * (i))]-dife*difyne[k + n3 * (j + n2 * (i))] -wce*q*0.5*(ne[k + n3 * (j+1 + n2 * (i))]+ne[k + n3 * (j + n2 * (i))])*Exy[k + n3 * (j + n2 * (i))]/(me*nue*nue)-wce*dife*difxyne[k + n3 * (j + n2 * (i))]/nue)/denominator_e; fiy[k + n3 * (j + n2 * (i))]= (0.5*(ni[k + n3 * (j+1 + n2 * (i))]+ni[k + n3 * (j + n2 * (i))])*mui*Ey[k + n3 * (j + n2 * (i))]-difi*difyni[k + n3 * (j + n2 * (i))] -wci*q*0.5*(ni[k + n3 * (j+1 + n2 * (i))]+ni[k + n3 * (j + n2 * (i))])*Exy[k + n3 * (j + n2 * (i))]/(mi*nui*nui)+wci*difi*difxyni[k + n3 * (j + n2 * (i))]/nui)/denominator_i; if (fey[k + n3 * (0 + n2 * (i))] > 0.0){ fey[k + n3 * (0 + n2 * (i))] = 0.0; } if (fiy[k + n3 * (0 + n2 * (i))] > 0.0){ fiy[k + n3 * (0 + n2 * (i))] = 0.0; } if (fey[k + n3 * (jmax-2 + n2 * (i))] < 0.0){ fey[k + n3 * (jmax-2 + n2 * (i))] = 0.0; } if (fiy[k + n3 * (jmax-2 + n2 * (i))] < 0.0){ fiy[k + n3 * (jmax-2 + n2 * (i))] = 0.0; } // ----------------------------------------------------------------------------------------------- // Calculating the average Exy and difxy to be used in x direction fluxes // Calculating the average values of Ey and gradiant_y Exy[k + n3 * (j + n2 * (i))]= 0.0 ; difxyne[k + n3 * (j + n2 * (i))]=0.0; difxyni[k + n3 * (j + n2 * (i))]=0.0; Exy[k + n3 * (j + n2 * (i))]= 0.25*(Ey[k + n3 * (j + n2 * (i))]+Ey[k + n3 * (j-1 + n2 * (i))]+Ey[k + n3 * (j + n2 * (i+1))]+Ey[k + n3 * (j-1 + n2 * (i+1))]); difxyne[k + n3 * (j + n2 * (i))]= 0.25*(difyne[k + n3 * (j + n2 * (i))]+difyne[k + n3 * (j-1 + n2 * (i))]+difyne[k + n3 * (j + n2 * (i+1))]+difyne[k + n3 * (j-1 + n2 * (i+1))]); difxyni[k + n3 * (j + n2 * (i))]= 0.25*(difyni[k + n3 * (j + n2 * (i))]+difyni[k + n3 * (j-1 + n2 * (i))]+difyni[k + n3 * (j + n2 * (i+1))]+difyni[k + n3 * (j-1 + n2 * (i+1))]); // ----------------------------------------------------------------------------------------------- // Now ready to calculate the fluxes in x direction fex[k + n3 * (j + n2 * (i))]=(-0.5*(ne[k + n3 * (j + n2 * (i))]+ne[k + n3 * (j + n2 * (i+1))])*mue*Ex[k + n3 * (j + n2 * (i))]-dife*difxne[k + n3 * (j + n2 * (i))] +wce*dife*difxyne[k + n3 * (j + n2 * (i))]/nue+wce*q*0.5*(ne[k + n3 * (j + n2 * (i))]+ne[k + n3 * (j + n2 * (i+1))])/(me*nue*nue)*Exy[k + n3 * (j + n2 * (i))])/denominator_e; fix[k + n3 * (j + n2 * (i))]=(0.5*(ni[k + n3 * (j + n2 * (i))]+ni[k + n3 * (j + n2 * (i+1))])*mui*Ex[k + n3 * (j + n2 * (i))]-difi*difxni[k + n3 * (j + n2 * (i))] -wci*difi*difxyni[k + n3 * (j + n2 * (i))]/nui+wci*q*0.5*(ni[k + n3 * (j + n2 * (i))]+ni[k + n3 * (j + n2 * (i+1))])*Exy[k + n3 * (j + n2 * (i))]/(mi*nui*nui))/denominator_i; if (fex[k + n3 * (j + n2 * (0))] > 0.0){ fex[k + n3 * (j + n2 * (0))] = 0.0; } if (fix[k + n3 * (j + n2 * (0))] > 0.0){ fix[k + n3 * (j + n2 * (0))] = 0.0; } if (fex[k + n3 * (j + n2 * (imax-2))] < 0.0){ fex[k + n3 * (j + n2 * (imax-2))] = 0.0; } if (fix[k + n3 * (j + n2 * (imax-2))] < 0.0){ fix[k + n3 * (j + n2 * (imax-2))] = 0.0; } // ----------------------------------------------------------------------------------------------- // Now we calculate the fluxes in z direction fez[k + n3 * (j + n2 * (i))]=-0.5*(ne[k + n3 * (j + n2 * (i))]+ne[k+1 + n3 * (j + n2 * (i))])*mue*Ez[k + n3 * (j + n2 * (i))]-dife*difzne[k + n3 * (j + n2 * (i))]; fiz[k + n3 * (j + n2 * (i))]=0.5*(ni[k + n3 * (j + n2 * (i))]+ni[k+1 + n3 * (j + n2 * (i))])*mui*Ez[k + n3 * (j + n2 * (i))]-difi*difzni[k + n3 * (j + n2 * (i))]; // BC on fluxes if (fez[0 + n3 * (j + n2 * (i))]>0.0){ fez[0 + n3 * (j + n2 * (i))]=0.0; } if (fiz[0 + n3 * (j + n2 * (i))]>0.0){ fiz[0 + n3 * (j + n2 * (i))]=0.0; } if (fez[kmax-2 + n3 * (j + n2 * (i))]<0.0){ fez[kmax-2 + n3 * (j + n2 * (i))]=0.0; } if (fiz[kmax-2 + n3 * (j + n2 * (i))]<0.0){ fiz[kmax-2 + n3 * (j + n2 * (i))]=0.0; } // ----------------------------------------------------------------------------------------------- ne[k + n3 * (j + n2 * (i))]=ne[k + n3 * (j + n2 * (i))]-dt*(fex[k + n3 * (j + n2 * (i))]-fex[k + n3 * (j + n2 * (i-1))]+fey[k + n3 * (j + n2 * (i))]-fey[k + n3 * (j-1 + n2 * (i))]+fez[k + n3 * (j + n2 * (i))]-fez[k-1 + n3 * (j + n2 * (i))])/h ; ni[k + n3 * (j + n2 * (i))]=ni[k + n3 * (j + n2 * (i))]-dt*(fix[k + n3 * (j + n2 * (i))]-fix[k + n3 * (j + n2 * (i-1))]+fiy[k + n3 * (j + n2 * (i))]-fiy[k + n3 * (j-1 + n2 * (i))]+fiz[k + n3 * (j + n2 * (i))]-fiz[k-1 + n3 * (j + n2 * (i))])/h ; ne[0 + n3 * (j + n2 * (i))] = -dt*fez[0 + n3 * (j + n2 * (i))]/h ; ni[0 + n3 * (j + n2 * (i))] = -dt*fiz[0 + n3 * (j + n2 * (i))]/h ; ne[k + n3 * (0 + n2 * (i))] = -dt*fey[k + n3 * (0 + n2 * (i))]/h ; ni[k + n3 * (0 + n2 * (i))] = -dt*fiy[k + n3 * (0 + n2 * (i))]/h ; ne[k + n3 * (j + n2 * (0))]= -dt*fex[k + n3 * (j + n2 * (0))]/h ; ni[k + n3 * (j + n2 * (0))]= -dt*fix[k + n3 * (j + n2 * (0))]/h ; // BC on densities ne[k + n3 * (0 + n2 * (i))] = 0.0 ; ni[k + n3 * (0 + n2 * (i))] = 0.0 ; ne[k + n3 * (jmax-1 + n2 * (i))] = 0.0 ; ni[k + n3 * (jmax-1 + n2 * (i))] = 0.0 ; ne[k + n3 * (j + n2 * (0))]= 0.0 ; ni[k + n3 * (j + n2 * (0))]= 0.0 ; ne[k + n3 * (j + n2 * (imax-1))]= 0.0 ; ni[k + n3 * (j + n2 * (imax-1))]= 0.0 ; ne[kmax-1 + n3 * (j + n2 * (i))]=0.0; ne[0 + n3 * (j + n2 * (i))]=0.0; ni[kmax-1 + n3 * (j + n2 * (i))]=0.0; ni[0 + n3 * (j + n2 * (i))]=0.0; sf_temp[0] = 0; } __global__ void sum_ne_cu(int imax, int jmax, int kmax, float* mat, float* res) { int index_x = threadIdx.x + blockDim.x * blockIdx.x; int stride_x = blockDim.x * gridDim.x; int n1=imax+3, n2 = jmax+3, n3 = kmax+3,i,j,k,myTime,kk,I,N,s1; N=n1*n2*n3; for (int I = index_x; I < N; I += stride_x) { int k = I % n3; int s1 = (I - k) / n3; int j = s1 % n2; int i = (s1 - j) / n2; if (i * j * k == 0 || i >= imax-1 || j >= jmax-1 || k >= kmax-1) continue; atomicAdd(res, mat[I]); } } __global__ void update_ne_cu(int imax, int jmax, int kmax, float* ne, float* ni, float *sf, float si) { int index_x = threadIdx.x + blockDim.x * blockIdx.x; int stride_x = blockDim.x * gridDim.x; int n1=imax+3, n2 = jmax+3, n3 = kmax+3,i,j,k,myTime,kk,I,N,s1; N=n1*n2*n3; float alpha=(si-sf[0])/(sf[0]); for ( int I = index_x; I < N; I += stride_x) { int k = I % n3; int s1 = (I - k) / n3; int j = s1 % n2; int i = (s1 - j) / n2; if (i * j * k == 0 || i >= imax-1 || j >= jmax-1 || k >= kmax-1) continue; ne[k + n3 * (j + n2 * (i))]=ne[k + n3 * (j + n2 * (i))]+alpha*ne[k + n3 * (j + n2 * (i))] ; ni[k + n3 * (j + n2 * (i))]=ni[k + n3 * (j + n2 * (i))]+alpha*ne[k + n3 * (j + n2 * (i))] ; } } int main() { int deviceIndex = 0; hipSetDevice(deviceIndex); printf("Using device %d\n", deviceIndex); int imax = 64, jmax = 64, kmax = 64,i,j,k; int n1 = imax+3, n2 = jmax+3, n3 = kmax+3; float qi=1.6E-19,qe=-1.6E-19, kr = 0,ki = 0,si = 0,sf = 0,alpha = 0, q=1.6E-19,pie=3.14159,Ta,w,eps0,Te,Ti,B,Kb,me,mi,nue,nui,denominator_e,denominator_i,nn,dt,h,wce,wci,mue,mui,dife,difi; int tmax = 100; float *ne; float *ni; float *ne_temp; float *ni_temp; float *difxne; float *difyne; float *difxni; float *difyni; float *difxyne; float *difxyni; float *Exy; float *fexy; float *fixy; float *g; float *g_temp; float *R; float *Ex; float *Ey; float *fex; float *fey; float *fix; float *fiy; float *V; float *L; float *difzne; float *difzni; float *Ez; float *fez; float *fiz; float *values; hipMallocManaged(&(values ), 32 * sizeof(float)); hipMallocManaged(&(ne ), n1 * n2 * n3 * sizeof(float)); hipMallocManaged(&(ni ), n1 * n2 * n3 * sizeof(float)); hipMallocManaged(&(ne_temp ), n1 * n2 * n3 * sizeof(float)); hipMallocManaged(&(ni_temp ), n1 * n2 * n3 * sizeof(float)); hipMallocManaged(&(difxne ), n1 * n2 * n3 * sizeof(float)); hipMallocManaged(&(difyne ), n1 * n2 * n3 * sizeof(float)); hipMallocManaged(&(difxni ), n1 * n2 * n3 * sizeof(float)); hipMallocManaged(&(difyni ), n1 * n2 * n3 * sizeof(float)); hipMallocManaged(&(difxyne ), n1 * n2 * n3 * sizeof(float)); hipMallocManaged(&(difxyni ), n1 * n2 * n3 * sizeof(float)); hipMallocManaged(&(Exy ), n1 * n2 * n3 * sizeof(float)); hipMallocManaged(&(fexy ), n1 * n2 * n3 * sizeof(float)); hipMallocManaged(&(fixy ), n1 * n2 * n3 * sizeof(float)); hipMallocManaged(&(g ), n1 * n2 * n3 * sizeof(float)); hipMallocManaged(&(g_temp ), n1 * n2 * n3 * sizeof(float)); hipMallocManaged(&(R ), n1 * n2 * n3 * sizeof(float)); hipMallocManaged(&(Ex ), n1 * n2 * n3 * sizeof(float)); hipMallocManaged(&(Ey ), n1 * n2 * n3 * sizeof(float)); hipMallocManaged(&(fex ), n1 * n2 * n3 * sizeof(float)); hipMallocManaged(&(fey ), n1 * n2 * n3 * sizeof(float)); hipMallocManaged(&(fix ), n1 * n2 * n3 * sizeof(float)); hipMallocManaged(&(fiy ), n1 * n2 * n3 * sizeof(float)); hipMallocManaged(&(V ), n1 * n2 * n3 * sizeof(float)); hipMallocManaged(&(L ), n1 * n2 * n3 * sizeof(float)); hipMallocManaged(&(difzne ), n1 * n2 * n3 * sizeof(float)); hipMallocManaged(&(difzni ), n1 * n2 * n3 * sizeof(float)); hipMallocManaged(&(Ez ), n1 * n2 * n3 * sizeof(float)); hipMallocManaged(&(fez ), n1 * n2 * n3 * sizeof(float)); hipMallocManaged(&(fiz ), n1 * n2 * n3 * sizeof(float)); Kb = 1.38E-23; B = 0.5; Te = 2.5*11604.5; Ti = 0.025*11604.5; me = 9.109E-31; mi = 6.633E-26; ki = 0.0; dt = 1.0E-14; h = 4.0E-4; eps0 = 8.854E-12; si = 0.0; sf =0.0; FILE*fp1; FILE*fp2; FILE*fp3; fp1=fopen("V_0.5B_20m.txt","w"); fp2=fopen("ne_0.5B_20m.txt","w"); fp3=fopen("ni_0.5B_20m.txt","w"); for ( i=0; i<imax+3;i++){ for ( j=0; j<jmax+3; j++){ for ( k=0; k<kmax+3;k++){ ne[k + n3 * (j + n2 * (i))] = 1e-9; ni[k + n3 * (j + n2 * (i))] = 1e-9; difxne[k + n3 * (j + n2 * (i))] = 1e-9; difyne[k + n3 * (j + n2 * (i))] = 1e-9; difxni[k + n3 * (j + n2 * (i))] = 1e-9; difyni[k + n3 * (j + n2 * (i))] = 1e-9; difxyne[k + n3 * (j + n2 * (i))] = 1e-9; difxyni[k + n3 * (j + n2 * (i))] = 1e-9; Exy[k + n3 * (j + n2 * (i))] = 1e-9; fexy[k + n3 * (j + n2 * (i))] = 1e-9; fixy[k + n3 * (j + n2 * (i))] = 1e-9; g[k + n3 * (j + n2 * (i))] = 1e-9; R[k + n3 * (j + n2 * (i))] = 1e-9; Ex[k + n3 * (j + n2 * (i))] = 1e-9; Ey[k + n3 * (j + n2 * (i))] = 1e-9; fex[k + n3 * (j + n2 * (i))] = 1e-9; fey[k + n3 * (j + n2 * (i))] = 1e-9; fix[k + n3 * (j + n2 * (i))] = 1e-9; fiy[k + n3 * (j + n2 * (i))] = 1e-9; V[k + n3 * (j + n2 * (i))] = 1e-9; L[k + n3 * (j + n2 * (i))] = 1e-9; difzne[k + n3 * (j + n2 * (i))] = 1e-9; difzni[k + n3 * (j + n2 * (i))] = 1e-9; Ez[k + n3 * (j + n2 * (i))] = 1e-9; fez[k + n3 * (j + n2 * (i))] = 1e-9; fiz[k + n3 * (j + n2 * (i))] = 1e-9; } } } nn=10.0/(Kb*Ti); //neutral density=p/(Kb.T) nue=nn*1.1E-19*sqrt(2.55*Kb*Te/me); // electron collision frequency= neutral density * sigma_e*Vth_e nui=nn*4.4E-19*sqrt(2.55*Kb*Ti/mi); wce=q*B/me; wci=q*B/mi; mue=q/(me*nue); mui=q/(mi*nui); dife=Kb*Te/(me*nue); difi=Kb*Ti/(mi*nui); ki=0.00002/(nn*dt); denominator_e= (1+wce*wce/(nue*nue)); denominator_i= (1+wci*wci/(nui*nui)); // Ta and W are just some constants needed for the iterative method that we have used to solve Poisson eq. Ta=acos((cos(pie/imax)+cos(pie/jmax)+cos(pie/kmax))/3.0);// needs to be float checked w=2.0/(1.0+sin(Ta)); // ----------------------------------------------------------------------------------------------- //Density initialization // To add multiple Gaussian sources, just simply use the density_initialization function at the (x,y) points that you want int x_position = 15, y_position = 15, z_position = 15; for ( i=1; i<imax-1;i++){ for ( j=1; j<jmax-1;j++){ for ( k=1; k<kmax-1;k++){ ne[k + n3 * (j + n2 * (i))]= 5.0E14;/* 1.0E14+1.0E14*exp(-(pow((i-x_position),2)+ pow((j-y_position),2)+pow((k-z_position),2))/100.0);*/ ni[k + n3 * (j + n2 * (i))]=5.0E14;/* 1.0E14+1.0E14*exp(-(pow((i-x_position),2)+ pow((j-y_position),2)+pow((k-z_position),2))/100.0);*/ } } } for ( i=18; i<22;i++){ for ( j=18; j<22;j++){ for ( k=20; k<40;k++){ ne[k + n3 * (j + n2 * (i))]=5.0E15; ni[k + n3 * (j + n2 * (i))]=5.0E15; } } } for ( i=38; i<42;i++){ for ( j=18; j<22;j++){ for ( k=20; k<40;k++){ ne[k + n3 * (j + n2 * (i))]=5.0E15; ni[k + n3 * (j + n2 * (i))]=5.0E15; } } } for ( i=18; i<22;i++){ for ( j=38; j<42;j++){ for ( k=20; k<40;k++){ ne[k + n3 * (j + n2 * (i))]=5.0E15; ni[k + n3 * (j + n2 * (i))]=5.0E15; } } } for ( i=38; i<42;i++){ for ( j=38; j<42;j++){ for ( k=20; k<40;k++){ ne[k + n3 * (j + n2 * (i))]=5.0E15; ni[k + n3 * (j + n2 * (i))]=5.0E15; } } } for ( k=1; k<kmax+1; k++) { for ( j=1; j<jmax+1; j++) { for ( i=1; i<imax+1;i++) { si=si+ne[k + n3 * (j + n2 * (i))] ; } } } int myTime,kk,I,N,s1; N=n1*n2*n3; int iterations = 40; float* sf_temp; hipMallocManaged(&sf_temp, sizeof(sf_temp)); for (i=0; i<31; i++) { values[0] = qi; values[1] = qe; values[2] = kr; values[3] = ki; values[4] = si; values[5] = sf; values[6] = alpha; values[7] = q; values[8] = pie; values[9] = Ta; values[10] = w; values[11] = eps0; values[12] = Te; values[13] = Ti; values[14] = B; values[15] = Kb; values[16] = me; values[17] = mi; values[18] = nue; values[19] = nui; values[20] = denominator_e; values[21] = denominator_i; values[22] = nn; values[23] = dt; values[24] = h; values[25] = wce; values[26] = wci; values[27] = mue; values[28] = mui; values[29] = dife; values[30] = difi; } double begin = clock(); for ( myTime=1; myTime<tmax; myTime++){ // This for loop takes care of myTime evolution //if (myTime % 1000 == 0) // printf("%d\n", myTime); hipLaunchKernelGGL(( before_poisson_cu), dim3(512), dim3(N/512), 0, 0, imax, jmax, kmax, ne, ni, g, g_temp, values); for (kk=0; kk<iterations; kk++) { hipLaunchKernelGGL(( poisson_solve_1it_cu), dim3(512), dim3(N/512), 0, 0, imax, jmax, kmax, n1, n2, n3, N, V, g, R, w, h, 1); hipLaunchKernelGGL(( poisson_solve_1it_cu), dim3(512), dim3(N/512), 0, 0, imax, jmax, kmax, n1, n2, n3, N, V, g, R, w, h, 0); } hipLaunchKernelGGL(( after_poisson_cu), dim3(512), dim3(N/512), 0, 0, imax, jmax, kmax, ne, ni , difxne, difyne, difxni , difyni, difxyne, difxyni, Exy, fexy, fixy, R, Ex, Ey , fex, fey, fix, fiy, V, difzne, difzni, Ez, fez, fiz , values, sf_temp); hipLaunchKernelGGL(( sum_ne_cu), dim3(512), dim3(N/512), 0, 0, imax, jmax, kmax, ne, sf_temp); hipLaunchKernelGGL(( update_ne_cu), dim3(512), dim3(N/512), 0, 0, imax, jmax, kmax, ne, ni, sf_temp, si); //printf("%d\n", myTime); } double time_spent1 = (clock() - begin) / CLOCKS_PER_SEC; printf("Time spent without parallelization: %f\n", time_spent1); hipDeviceSynchronize(); printf("%f\n", V[5 + (kmax+3) * (5 + (jmax+3) * 5)]); printf("%f\n", g[5 + (kmax+3) * (5 + (jmax+3) * 5)]); //for ( i=0; i<imax+1;i++){ // for ( j=0; j<jmax+1; j++){ // fprintf(fp1,"%d %d %f \n", i,j,V[31 + n3 * (j + n2 * (i))]); // fprintf(fp2,"%d %d %f \n", i,j,ne[31 + n3 * (j + n2 * (i))]); // fprintf(fp3,"%d %d %f \n", i,j,ni[31 + n3 * (j + n2 * (i))]); // } // } hipDeviceReset() ; }
d968eac8040eff4c5670de5d06a2ae8e1abbcb0e.cu
#include <stdio.h> #include <stdlib.h> #include <math.h> #include <stdio.h> #include <stdlib.h> #include <math.h> __global__ void mardas(int imax, int jmax, int kmax, int n1, int n2, int n3, int N, int iterations, float* V, float* g, float *R, float w, float h, int oddEven) { int index_x = threadIdx.x + blockDim.x * blockIdx.x; int stride_x = blockDim.x * gridDim.x; for (int I = index_x; I < N; I +=stride_x) { V[I] = 1000; } } __global__ void poisson_solve_1it_cu(int imax, int jmax, int kmax, int n1, int n2, int n3, int N, float* V, float* g, float *R, float w, float h, int oddEven) { int I = threadIdx.x + blockDim.x * blockIdx.x; //int stride_x = blockDim.x * gridDim.x; //for (int I = index_x; I < N; I +=stride_x) { int k = I % n3; int s1 = (I - k) / n3; int j = s1 % n2; int i = (s1 - j) / n2; if (i * j * k == 0 || i >= imax-1 || j >= jmax-1 || k >= kmax-1) return; if ((i+j+k)%2==oddEven) return; R[k + n3 * (j + n2 * (i))]= (V[k + n3 * (j + n2 * (i+1))]+ V[k + n3 * (j + n2 * (i-1))]+ V[k + n3 * (j+1 + n2 * (i))]+ V[k + n3 * (j-1 + n2 * (i))]+ V[k+1 + n3 * (j + n2 * (i))]+ V[k-1 + n3 * (j + n2 * (i))] ) / 6.0 - V[k + n3 * (j + n2 * (i))]- (h*h)*g[k + n3 * (j + n2 * (i))]/6.0; V[k + n3 * (j + n2 * (i))] += w*R[k + n3 * (j + n2 * (i))]; //} } __global__ void before_poisson_cu(int imax, int jmax, int kmax, float* ne,float* ni, float *g, float* g_temp, float *values) { int index_x = threadIdx.x + blockDim.x * blockIdx.x; int stride_x = blockDim.x * gridDim.x; int n1=imax+3, n2 = jmax+3, n3 = kmax+3,i,j,k,myTime,kk,I,N,s1; N=n1*n2*n3; float qi = values[0]; float qe = values[1]; float w = values[10]; float eps0 = values[11]; float h = values[24]; for (int I=index_x; I<N; I+=stride_x) g_temp[I] = w*h*h*g[I]/6.; for (int I=index_x; I<N; I+=stride_x) { int k = I % n3; int s1 = (I - k) / n3; int j = s1 % n2; int i = (s1 - j) / n2; if (i >= 1 && i < imax-1 && j >= 1 && j < jmax-1 && k >= 1 && k < kmax-1) { g_temp[I] += w/6.*(g_temp[I-1]+g_temp[I-n3]+g_temp[I-n3*n2]); } else { g_temp[I] = 0; } } for ( I = index_x; I < N; I += stride_x) { k = I % n3; s1 = (I - k) / n3; j = s1 % n2; i = (s1 - j) / n2; if (i * j * k == 0 || i >= imax-1 || j >= jmax-1 || k >= kmax-1) continue; g[k + n3 * (j + n2 * (i))]=-(ne[k + n3 * (j + n2 * (i))]*qe+ni[k + n3 * (j + n2 * (i))]*qi)/eps0; } } __global__ void after_poisson_cu(int imax, int jmax, int kmax, float *ne, float *ni , float *difxne, float *difyne, float *difxni , float *difyni, float *difxyne, float *difxyni, float *Exy, float *fexy , float *fixy, float *R, float *Ex, float *Ey , float *fex, float *fey, float *fix, float *fiy, float *V, float *difzne, float *difzni, float *Ez, float *fez, float *fiz , float *values, float *sf_temp) { int index_x = threadIdx.x + blockDim.x * blockIdx.x; int stride_x = blockDim.x * gridDim.x; int I = index_x; int n1=imax+3, n2 = jmax+3, n3 = kmax+3,i,j,k,myTime,kk,N,s1; N=n1*n2*n3; float q = values[7]; float me = values[16]; float mi = values[17]; float nue = values[18]; float nui = values[19]; float denominator_e = values[20]; float denominator_i = values[21]; float dt = values[23]; float h = values[24]; float wce = values[25]; float wci = values[26]; float mue = values[27]; float mui = values[28]; float dife = values[29]; float difi = values[30]; k = I % n3; s1 = (I - k) / n3; j = s1 % n2; i = (s1 - j) / n2; if (i >= imax-1 || j >= jmax || k >= kmax) return; Ex[k + n3 * (j + n2 * (i))]= (V[k + n3 * (j + n2 * (i))]-V[k + n3 * (j + n2 * (i+1))])/h; difxne[k + n3 * (j + n2 * (i))]=(ne[k + n3 * (j + n2 * (i+1))]-ne[k + n3 * (j + n2 * (i))])/h; difxni[k + n3 * (j + n2 * (i))]=(ni[k + n3 * (j + n2 * (i+1))]-ni[k + n3 * (j + n2 * (i))])/h; Ey[k + n3 * (j + n2 * (i))]= (V[k + n3 * (j + n2 * (i))]-V[k + n3 * (j+1 + n2 * (i))])/h; difyne[k + n3 * (j + n2 * (i))]=(ne[k + n3 * (j+1 + n2 * (i))]-ne[k + n3 * (j + n2 * (i))])/h; difyni[k + n3 * (j + n2 * (i))]=(ni[k + n3 * (j+1 + n2 * (i))]-ni[k + n3 * (j + n2 * (i))])/h; Ez[k + n3 * (j + n2 * (i))]= (V[k + n3 * (j + n2 * (i))]-V[k+1 + n3 * (j + n2 * (i))])/h; difzne[k + n3 * (j + n2 * (i))]=(ne[k+1 + n3 * (j + n2 * (i))]-ne[k + n3 * (j + n2 * (i))])/h; difzni[k + n3 * (j + n2 * (i))]=(ni[k+1 + n3 * (j + n2 * (i))]-ni[k + n3 * (j + n2 * (i))])/h; // ----------------------------------------------------------------------------------------------- /* Since I am using mid points for Calculating electric field and density gradient, to calculate Ex at any point that I don't have it directly, the average over the neighboring points is used instead. these average variables are, exy, fexy, fixy, ...*/ // Calculating the average values of Ex and gradiant_x Exy[k + n3 * (j + n2 * (i))]= 0.0 ; difxyne[k + n3 * (j + n2 * (i))]=0.0; difxyni[k + n3 * (j + n2 * (i))]=0.0; Exy[k + n3 * (j + n2 * (i))]= 0.25*(Ex[k + n3 * (j + n2 * (i))]+Ex[k + n3 * (j+1 + n2 * (i))]+Ex[k + n3 * (j + n2 * (i-1))]+Ex[k + n3 * (j+1 + n2 * (i-1))]) ; difxyne[k + n3 * (j + n2 * (i))]=0.25*(difxne[k + n3 * (j + n2 * (i))]+difxne[k + n3 * (j+1 + n2 * (i))]+difxne[k + n3 * (j + n2 * (i-1))]+difxne[k + n3 * (j+1 + n2 * (i-1))]); difxyni[k + n3 * (j + n2 * (i))]=0.25*(difxni[k + n3 * (j + n2 * (i))]+difxni[k + n3 * (j+1 + n2 * (i))]+difxni[k + n3 * (j + n2 * (i-1))]+difxni[k + n3 * (j+1 + n2 * (i-1))]); // ----------------------------------------------------------------------------------------------- // Here we calculate the fluxes in y direction fey[k + n3 * (j + n2 * (i))]= (-0.5*(ne[k + n3 * (j+1 + n2 * (i))]+ne[k + n3 * (j + n2 * (i))])*mue*Ey[k + n3 * (j + n2 * (i))]-dife*difyne[k + n3 * (j + n2 * (i))] -wce*q*0.5*(ne[k + n3 * (j+1 + n2 * (i))]+ne[k + n3 * (j + n2 * (i))])*Exy[k + n3 * (j + n2 * (i))]/(me*nue*nue)-wce*dife*difxyne[k + n3 * (j + n2 * (i))]/nue)/denominator_e; fiy[k + n3 * (j + n2 * (i))]= (0.5*(ni[k + n3 * (j+1 + n2 * (i))]+ni[k + n3 * (j + n2 * (i))])*mui*Ey[k + n3 * (j + n2 * (i))]-difi*difyni[k + n3 * (j + n2 * (i))] -wci*q*0.5*(ni[k + n3 * (j+1 + n2 * (i))]+ni[k + n3 * (j + n2 * (i))])*Exy[k + n3 * (j + n2 * (i))]/(mi*nui*nui)+wci*difi*difxyni[k + n3 * (j + n2 * (i))]/nui)/denominator_i; if (fey[k + n3 * (0 + n2 * (i))] > 0.0){ fey[k + n3 * (0 + n2 * (i))] = 0.0; } if (fiy[k + n3 * (0 + n2 * (i))] > 0.0){ fiy[k + n3 * (0 + n2 * (i))] = 0.0; } if (fey[k + n3 * (jmax-2 + n2 * (i))] < 0.0){ fey[k + n3 * (jmax-2 + n2 * (i))] = 0.0; } if (fiy[k + n3 * (jmax-2 + n2 * (i))] < 0.0){ fiy[k + n3 * (jmax-2 + n2 * (i))] = 0.0; } // ----------------------------------------------------------------------------------------------- // Calculating the average Exy and difxy to be used in x direction fluxes // Calculating the average values of Ey and gradiant_y Exy[k + n3 * (j + n2 * (i))]= 0.0 ; difxyne[k + n3 * (j + n2 * (i))]=0.0; difxyni[k + n3 * (j + n2 * (i))]=0.0; Exy[k + n3 * (j + n2 * (i))]= 0.25*(Ey[k + n3 * (j + n2 * (i))]+Ey[k + n3 * (j-1 + n2 * (i))]+Ey[k + n3 * (j + n2 * (i+1))]+Ey[k + n3 * (j-1 + n2 * (i+1))]); difxyne[k + n3 * (j + n2 * (i))]= 0.25*(difyne[k + n3 * (j + n2 * (i))]+difyne[k + n3 * (j-1 + n2 * (i))]+difyne[k + n3 * (j + n2 * (i+1))]+difyne[k + n3 * (j-1 + n2 * (i+1))]); difxyni[k + n3 * (j + n2 * (i))]= 0.25*(difyni[k + n3 * (j + n2 * (i))]+difyni[k + n3 * (j-1 + n2 * (i))]+difyni[k + n3 * (j + n2 * (i+1))]+difyni[k + n3 * (j-1 + n2 * (i+1))]); // ----------------------------------------------------------------------------------------------- // Now ready to calculate the fluxes in x direction fex[k + n3 * (j + n2 * (i))]=(-0.5*(ne[k + n3 * (j + n2 * (i))]+ne[k + n3 * (j + n2 * (i+1))])*mue*Ex[k + n3 * (j + n2 * (i))]-dife*difxne[k + n3 * (j + n2 * (i))] +wce*dife*difxyne[k + n3 * (j + n2 * (i))]/nue+wce*q*0.5*(ne[k + n3 * (j + n2 * (i))]+ne[k + n3 * (j + n2 * (i+1))])/(me*nue*nue)*Exy[k + n3 * (j + n2 * (i))])/denominator_e; fix[k + n3 * (j + n2 * (i))]=(0.5*(ni[k + n3 * (j + n2 * (i))]+ni[k + n3 * (j + n2 * (i+1))])*mui*Ex[k + n3 * (j + n2 * (i))]-difi*difxni[k + n3 * (j + n2 * (i))] -wci*difi*difxyni[k + n3 * (j + n2 * (i))]/nui+wci*q*0.5*(ni[k + n3 * (j + n2 * (i))]+ni[k + n3 * (j + n2 * (i+1))])*Exy[k + n3 * (j + n2 * (i))]/(mi*nui*nui))/denominator_i; if (fex[k + n3 * (j + n2 * (0))] > 0.0){ fex[k + n3 * (j + n2 * (0))] = 0.0; } if (fix[k + n3 * (j + n2 * (0))] > 0.0){ fix[k + n3 * (j + n2 * (0))] = 0.0; } if (fex[k + n3 * (j + n2 * (imax-2))] < 0.0){ fex[k + n3 * (j + n2 * (imax-2))] = 0.0; } if (fix[k + n3 * (j + n2 * (imax-2))] < 0.0){ fix[k + n3 * (j + n2 * (imax-2))] = 0.0; } // ----------------------------------------------------------------------------------------------- // Now we calculate the fluxes in z direction fez[k + n3 * (j + n2 * (i))]=-0.5*(ne[k + n3 * (j + n2 * (i))]+ne[k+1 + n3 * (j + n2 * (i))])*mue*Ez[k + n3 * (j + n2 * (i))]-dife*difzne[k + n3 * (j + n2 * (i))]; fiz[k + n3 * (j + n2 * (i))]=0.5*(ni[k + n3 * (j + n2 * (i))]+ni[k+1 + n3 * (j + n2 * (i))])*mui*Ez[k + n3 * (j + n2 * (i))]-difi*difzni[k + n3 * (j + n2 * (i))]; // BC on fluxes if (fez[0 + n3 * (j + n2 * (i))]>0.0){ fez[0 + n3 * (j + n2 * (i))]=0.0; } if (fiz[0 + n3 * (j + n2 * (i))]>0.0){ fiz[0 + n3 * (j + n2 * (i))]=0.0; } if (fez[kmax-2 + n3 * (j + n2 * (i))]<0.0){ fez[kmax-2 + n3 * (j + n2 * (i))]=0.0; } if (fiz[kmax-2 + n3 * (j + n2 * (i))]<0.0){ fiz[kmax-2 + n3 * (j + n2 * (i))]=0.0; } // ----------------------------------------------------------------------------------------------- ne[k + n3 * (j + n2 * (i))]=ne[k + n3 * (j + n2 * (i))]-dt*(fex[k + n3 * (j + n2 * (i))]-fex[k + n3 * (j + n2 * (i-1))]+fey[k + n3 * (j + n2 * (i))]-fey[k + n3 * (j-1 + n2 * (i))]+fez[k + n3 * (j + n2 * (i))]-fez[k-1 + n3 * (j + n2 * (i))])/h ; ni[k + n3 * (j + n2 * (i))]=ni[k + n3 * (j + n2 * (i))]-dt*(fix[k + n3 * (j + n2 * (i))]-fix[k + n3 * (j + n2 * (i-1))]+fiy[k + n3 * (j + n2 * (i))]-fiy[k + n3 * (j-1 + n2 * (i))]+fiz[k + n3 * (j + n2 * (i))]-fiz[k-1 + n3 * (j + n2 * (i))])/h ; ne[0 + n3 * (j + n2 * (i))] = -dt*fez[0 + n3 * (j + n2 * (i))]/h ; ni[0 + n3 * (j + n2 * (i))] = -dt*fiz[0 + n3 * (j + n2 * (i))]/h ; ne[k + n3 * (0 + n2 * (i))] = -dt*fey[k + n3 * (0 + n2 * (i))]/h ; ni[k + n3 * (0 + n2 * (i))] = -dt*fiy[k + n3 * (0 + n2 * (i))]/h ; ne[k + n3 * (j + n2 * (0))]= -dt*fex[k + n3 * (j + n2 * (0))]/h ; ni[k + n3 * (j + n2 * (0))]= -dt*fix[k + n3 * (j + n2 * (0))]/h ; // BC on densities ne[k + n3 * (0 + n2 * (i))] = 0.0 ; ni[k + n3 * (0 + n2 * (i))] = 0.0 ; ne[k + n3 * (jmax-1 + n2 * (i))] = 0.0 ; ni[k + n3 * (jmax-1 + n2 * (i))] = 0.0 ; ne[k + n3 * (j + n2 * (0))]= 0.0 ; ni[k + n3 * (j + n2 * (0))]= 0.0 ; ne[k + n3 * (j + n2 * (imax-1))]= 0.0 ; ni[k + n3 * (j + n2 * (imax-1))]= 0.0 ; ne[kmax-1 + n3 * (j + n2 * (i))]=0.0; ne[0 + n3 * (j + n2 * (i))]=0.0; ni[kmax-1 + n3 * (j + n2 * (i))]=0.0; ni[0 + n3 * (j + n2 * (i))]=0.0; sf_temp[0] = 0; } __global__ void sum_ne_cu(int imax, int jmax, int kmax, float* mat, float* res) { int index_x = threadIdx.x + blockDim.x * blockIdx.x; int stride_x = blockDim.x * gridDim.x; int n1=imax+3, n2 = jmax+3, n3 = kmax+3,i,j,k,myTime,kk,I,N,s1; N=n1*n2*n3; for (int I = index_x; I < N; I += stride_x) { int k = I % n3; int s1 = (I - k) / n3; int j = s1 % n2; int i = (s1 - j) / n2; if (i * j * k == 0 || i >= imax-1 || j >= jmax-1 || k >= kmax-1) continue; atomicAdd(res, mat[I]); } } __global__ void update_ne_cu(int imax, int jmax, int kmax, float* ne, float* ni, float *sf, float si) { int index_x = threadIdx.x + blockDim.x * blockIdx.x; int stride_x = blockDim.x * gridDim.x; int n1=imax+3, n2 = jmax+3, n3 = kmax+3,i,j,k,myTime,kk,I,N,s1; N=n1*n2*n3; float alpha=(si-sf[0])/(sf[0]); for ( int I = index_x; I < N; I += stride_x) { int k = I % n3; int s1 = (I - k) / n3; int j = s1 % n2; int i = (s1 - j) / n2; if (i * j * k == 0 || i >= imax-1 || j >= jmax-1 || k >= kmax-1) continue; ne[k + n3 * (j + n2 * (i))]=ne[k + n3 * (j + n2 * (i))]+alpha*ne[k + n3 * (j + n2 * (i))] ; ni[k + n3 * (j + n2 * (i))]=ni[k + n3 * (j + n2 * (i))]+alpha*ne[k + n3 * (j + n2 * (i))] ; } } int main() { int deviceIndex = 0; cudaSetDevice(deviceIndex); printf("Using device %d\n", deviceIndex); int imax = 64, jmax = 64, kmax = 64,i,j,k; int n1 = imax+3, n2 = jmax+3, n3 = kmax+3; float qi=1.6E-19,qe=-1.6E-19, kr = 0,ki = 0,si = 0,sf = 0,alpha = 0, q=1.6E-19,pie=3.14159,Ta,w,eps0,Te,Ti,B,Kb,me,mi,nue,nui,denominator_e,denominator_i,nn,dt,h,wce,wci,mue,mui,dife,difi; int tmax = 100; float *ne; float *ni; float *ne_temp; float *ni_temp; float *difxne; float *difyne; float *difxni; float *difyni; float *difxyne; float *difxyni; float *Exy; float *fexy; float *fixy; float *g; float *g_temp; float *R; float *Ex; float *Ey; float *fex; float *fey; float *fix; float *fiy; float *V; float *L; float *difzne; float *difzni; float *Ez; float *fez; float *fiz; float *values; cudaMallocManaged(&(values ), 32 * sizeof(float)); cudaMallocManaged(&(ne ), n1 * n2 * n3 * sizeof(float)); cudaMallocManaged(&(ni ), n1 * n2 * n3 * sizeof(float)); cudaMallocManaged(&(ne_temp ), n1 * n2 * n3 * sizeof(float)); cudaMallocManaged(&(ni_temp ), n1 * n2 * n3 * sizeof(float)); cudaMallocManaged(&(difxne ), n1 * n2 * n3 * sizeof(float)); cudaMallocManaged(&(difyne ), n1 * n2 * n3 * sizeof(float)); cudaMallocManaged(&(difxni ), n1 * n2 * n3 * sizeof(float)); cudaMallocManaged(&(difyni ), n1 * n2 * n3 * sizeof(float)); cudaMallocManaged(&(difxyne ), n1 * n2 * n3 * sizeof(float)); cudaMallocManaged(&(difxyni ), n1 * n2 * n3 * sizeof(float)); cudaMallocManaged(&(Exy ), n1 * n2 * n3 * sizeof(float)); cudaMallocManaged(&(fexy ), n1 * n2 * n3 * sizeof(float)); cudaMallocManaged(&(fixy ), n1 * n2 * n3 * sizeof(float)); cudaMallocManaged(&(g ), n1 * n2 * n3 * sizeof(float)); cudaMallocManaged(&(g_temp ), n1 * n2 * n3 * sizeof(float)); cudaMallocManaged(&(R ), n1 * n2 * n3 * sizeof(float)); cudaMallocManaged(&(Ex ), n1 * n2 * n3 * sizeof(float)); cudaMallocManaged(&(Ey ), n1 * n2 * n3 * sizeof(float)); cudaMallocManaged(&(fex ), n1 * n2 * n3 * sizeof(float)); cudaMallocManaged(&(fey ), n1 * n2 * n3 * sizeof(float)); cudaMallocManaged(&(fix ), n1 * n2 * n3 * sizeof(float)); cudaMallocManaged(&(fiy ), n1 * n2 * n3 * sizeof(float)); cudaMallocManaged(&(V ), n1 * n2 * n3 * sizeof(float)); cudaMallocManaged(&(L ), n1 * n2 * n3 * sizeof(float)); cudaMallocManaged(&(difzne ), n1 * n2 * n3 * sizeof(float)); cudaMallocManaged(&(difzni ), n1 * n2 * n3 * sizeof(float)); cudaMallocManaged(&(Ez ), n1 * n2 * n3 * sizeof(float)); cudaMallocManaged(&(fez ), n1 * n2 * n3 * sizeof(float)); cudaMallocManaged(&(fiz ), n1 * n2 * n3 * sizeof(float)); Kb = 1.38E-23; B = 0.5; Te = 2.5*11604.5; Ti = 0.025*11604.5; me = 9.109E-31; mi = 6.633E-26; ki = 0.0; dt = 1.0E-14; h = 4.0E-4; eps0 = 8.854E-12; si = 0.0; sf =0.0; FILE*fp1; FILE*fp2; FILE*fp3; fp1=fopen("V_0.5B_20m.txt","w"); fp2=fopen("ne_0.5B_20m.txt","w"); fp3=fopen("ni_0.5B_20m.txt","w"); for ( i=0; i<imax+3;i++){ for ( j=0; j<jmax+3; j++){ for ( k=0; k<kmax+3;k++){ ne[k + n3 * (j + n2 * (i))] = 1e-9; ni[k + n3 * (j + n2 * (i))] = 1e-9; difxne[k + n3 * (j + n2 * (i))] = 1e-9; difyne[k + n3 * (j + n2 * (i))] = 1e-9; difxni[k + n3 * (j + n2 * (i))] = 1e-9; difyni[k + n3 * (j + n2 * (i))] = 1e-9; difxyne[k + n3 * (j + n2 * (i))] = 1e-9; difxyni[k + n3 * (j + n2 * (i))] = 1e-9; Exy[k + n3 * (j + n2 * (i))] = 1e-9; fexy[k + n3 * (j + n2 * (i))] = 1e-9; fixy[k + n3 * (j + n2 * (i))] = 1e-9; g[k + n3 * (j + n2 * (i))] = 1e-9; R[k + n3 * (j + n2 * (i))] = 1e-9; Ex[k + n3 * (j + n2 * (i))] = 1e-9; Ey[k + n3 * (j + n2 * (i))] = 1e-9; fex[k + n3 * (j + n2 * (i))] = 1e-9; fey[k + n3 * (j + n2 * (i))] = 1e-9; fix[k + n3 * (j + n2 * (i))] = 1e-9; fiy[k + n3 * (j + n2 * (i))] = 1e-9; V[k + n3 * (j + n2 * (i))] = 1e-9; L[k + n3 * (j + n2 * (i))] = 1e-9; difzne[k + n3 * (j + n2 * (i))] = 1e-9; difzni[k + n3 * (j + n2 * (i))] = 1e-9; Ez[k + n3 * (j + n2 * (i))] = 1e-9; fez[k + n3 * (j + n2 * (i))] = 1e-9; fiz[k + n3 * (j + n2 * (i))] = 1e-9; } } } nn=10.0/(Kb*Ti); //neutral density=p/(Kb.T) nue=nn*1.1E-19*sqrt(2.55*Kb*Te/me); // electron collision frequency= neutral density * sigma_e*Vth_e nui=nn*4.4E-19*sqrt(2.55*Kb*Ti/mi); wce=q*B/me; wci=q*B/mi; mue=q/(me*nue); mui=q/(mi*nui); dife=Kb*Te/(me*nue); difi=Kb*Ti/(mi*nui); ki=0.00002/(nn*dt); denominator_e= (1+wce*wce/(nue*nue)); denominator_i= (1+wci*wci/(nui*nui)); // Ta and W are just some constants needed for the iterative method that we have used to solve Poisson eq. Ta=acos((cos(pie/imax)+cos(pie/jmax)+cos(pie/kmax))/3.0);// needs to be float checked w=2.0/(1.0+sin(Ta)); // ----------------------------------------------------------------------------------------------- //Density initialization // To add multiple Gaussian sources, just simply use the density_initialization function at the (x,y) points that you want int x_position = 15, y_position = 15, z_position = 15; for ( i=1; i<imax-1;i++){ for ( j=1; j<jmax-1;j++){ for ( k=1; k<kmax-1;k++){ ne[k + n3 * (j + n2 * (i))]= 5.0E14;/* 1.0E14+1.0E14*exp(-(pow((i-x_position),2)+ pow((j-y_position),2)+pow((k-z_position),2))/100.0);*/ ni[k + n3 * (j + n2 * (i))]=5.0E14;/* 1.0E14+1.0E14*exp(-(pow((i-x_position),2)+ pow((j-y_position),2)+pow((k-z_position),2))/100.0);*/ } } } for ( i=18; i<22;i++){ for ( j=18; j<22;j++){ for ( k=20; k<40;k++){ ne[k + n3 * (j + n2 * (i))]=5.0E15; ni[k + n3 * (j + n2 * (i))]=5.0E15; } } } for ( i=38; i<42;i++){ for ( j=18; j<22;j++){ for ( k=20; k<40;k++){ ne[k + n3 * (j + n2 * (i))]=5.0E15; ni[k + n3 * (j + n2 * (i))]=5.0E15; } } } for ( i=18; i<22;i++){ for ( j=38; j<42;j++){ for ( k=20; k<40;k++){ ne[k + n3 * (j + n2 * (i))]=5.0E15; ni[k + n3 * (j + n2 * (i))]=5.0E15; } } } for ( i=38; i<42;i++){ for ( j=38; j<42;j++){ for ( k=20; k<40;k++){ ne[k + n3 * (j + n2 * (i))]=5.0E15; ni[k + n3 * (j + n2 * (i))]=5.0E15; } } } for ( k=1; k<kmax+1; k++) { for ( j=1; j<jmax+1; j++) { for ( i=1; i<imax+1;i++) { si=si+ne[k + n3 * (j + n2 * (i))] ; } } } int myTime,kk,I,N,s1; N=n1*n2*n3; int iterations = 40; float* sf_temp; cudaMallocManaged(&sf_temp, sizeof(sf_temp)); for (i=0; i<31; i++) { values[0] = qi; values[1] = qe; values[2] = kr; values[3] = ki; values[4] = si; values[5] = sf; values[6] = alpha; values[7] = q; values[8] = pie; values[9] = Ta; values[10] = w; values[11] = eps0; values[12] = Te; values[13] = Ti; values[14] = B; values[15] = Kb; values[16] = me; values[17] = mi; values[18] = nue; values[19] = nui; values[20] = denominator_e; values[21] = denominator_i; values[22] = nn; values[23] = dt; values[24] = h; values[25] = wce; values[26] = wci; values[27] = mue; values[28] = mui; values[29] = dife; values[30] = difi; } double begin = clock(); for ( myTime=1; myTime<tmax; myTime++){ // This for loop takes care of myTime evolution //if (myTime % 1000 == 0) // printf("%d\n", myTime); before_poisson_cu<<<512, N/512>>>(imax, jmax, kmax, ne, ni, g, g_temp, values); for (kk=0; kk<iterations; kk++) { poisson_solve_1it_cu<<<512, N/512>>>(imax, jmax, kmax, n1, n2, n3, N, V, g, R, w, h, 1); poisson_solve_1it_cu<<<512, N/512>>>(imax, jmax, kmax, n1, n2, n3, N, V, g, R, w, h, 0); } after_poisson_cu<<<512, N/512>>>( imax, jmax, kmax, ne, ni , difxne, difyne, difxni , difyni, difxyne, difxyni, Exy, fexy, fixy, R, Ex, Ey , fex, fey, fix, fiy, V, difzne, difzni, Ez, fez, fiz , values, sf_temp); sum_ne_cu<<<512, N/512>>>(imax, jmax, kmax, ne, sf_temp); update_ne_cu<<<512, N/512>>>(imax, jmax, kmax, ne, ni, sf_temp, si); //printf("%d\n", myTime); } double time_spent1 = (clock() - begin) / CLOCKS_PER_SEC; printf("Time spent without parallelization: %f\n", time_spent1); cudaDeviceSynchronize(); printf("%f\n", V[5 + (kmax+3) * (5 + (jmax+3) * 5)]); printf("%f\n", g[5 + (kmax+3) * (5 + (jmax+3) * 5)]); //for ( i=0; i<imax+1;i++){ // for ( j=0; j<jmax+1; j++){ // fprintf(fp1,"%d %d %f \n", i,j,V[31 + n3 * (j + n2 * (i))]); // fprintf(fp2,"%d %d %f \n", i,j,ne[31 + n3 * (j + n2 * (i))]); // fprintf(fp3,"%d %d %f \n", i,j,ni[31 + n3 * (j + n2 * (i))]); // } // } cudaDeviceReset() ; }
3df36534ffbf85061d62c368a75c54b29e1b4b97.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include "cudaParticleMD.hh" #include "kernelfuncs.h" #include <assert.h> #include "kerneltemplate.hh" cudaParticleMD::~cudaParticleMD() { if (m!=NULL) hipFree(m); if (hdl!=NULL) hipblasDestroy(hdl); } void cudaParticleMD::setup(int n) { cudaParticleVV::setup(n); // alloc m 1/(1/mass) hipMalloc((void **)&m, sizeof(real)*N); if (withInfo) ErrorInfo("malloc m[] on GPU"); // preparation for CUBLAS if (hdl==NULL) hipblasCreate(&hdl); } void cudaParticleMD::calcForce(void) { hipLaunchKernelGGL(( clearArray_F4), dim3(MPnum), dim3(THnum1D), 0, 0, F, N); class potentialLJ LJ; class MDpairForce<potentialLJ> P; P.cx = cell[6]; P.cy = cell[7]; P.cz = cell[8]; P.c0x = sqrt(P.cx) * 2.0; P.c0y = sqrt(P.cy) * 2.0; P.c0z = sqrt(P.cz) * 2.0; P.rmax2 = rmax2; P.typeID = typeID_s; P.op = LJ; #if defined(CUDACUTOFFBLOCK) assert(myBlockNum<maxGrid); if (useNlist) { hipLaunchKernelGGL(( calcF_IJpairWithList_F4), dim3(MPnum), dim3(THnum1DX), 0, 0, P, NList.rowPtr, NList.colIdx, r, F, N, NULL, 0, 0); } else { hipLaunchKernelGGL(( clearArray), dim3(MPnum), dim3(THnum1D), 0, 0, tmp81N, N*81); dim3 _mpnum, _thnum; _mpnum.x = myBlockNum ; _mpnum.y = 27; _mpnum.z = 1; _thnum.x = THnum2D2; _thnum.y = 1; _thnum.z = 1; assert(_thnum.x*_thnum.y <= threadsMax); hipLaunchKernelGGL(( calcF_IJpairWithBlock_F4<class MDpairForce<potentialLJ> >), dim3(_mpnum), dim3(_thnum), 0, 0, P, r_s, tmp81N, myBlockOffset, blockNeighbor, pid, bindex, N, NULL,NULL, true); hipLaunchKernelGGL(( reduce27_F4), dim3(MPnum), dim3(THnum1D), 0, 0, F, tmp81N, N); } if (withInfo) ErrorInfo("cudaParticleMD::calcForce With Block"); #else hipLaunchKernelGGL(( calcF_IJpair<class MDpairForce<potentialLJ> >), dim3(MPnum), dim3(THnum1D), 0, 0, P, r, F, N); //calcF_IJpair<class potentialLJ><<<_mpnum, _thnum>>>(LJ, r, F, typeID, N, cell[6], cell[7], cell[8], rmax2); //calcF_LJ<<<_mpnum, _thnum>>>(r, F, typeID, N, cx*cx, cy*cy, cz*cz); if (withInfo) ErrorInfo("cudaParticleMD::calcForce Without Block"); #endif } real cudaParticleMD::calcPotentialE(void) { hipLaunchKernelGGL(( clearArray_F4), dim3(MPnum), dim3(THnum1D), 0, 0, tmp3N, N); class potentialLJ LJ; class MDpairPotential<potentialLJ> P; P.cx = cell[6]; P.cy = cell[7]; P.cz = cell[8]; P.c0x = sqrt(P.cx) * 2.0; P.c0y = sqrt(P.cy) * 2.0; P.c0z = sqrt(P.cz) * 2.0; P.rmax2 = rmax2; P.typeID = typeID_s; P.op = LJ; real f = 0; #if defined(CUDACUTOFFBLOCK) assert(myBlockNum<maxGrid); if (useNlist) { hipLaunchKernelGGL(( calcF_IJpairWithList_F4), dim3(MPnum), dim3(THnum1DX), 0, 0, P, NList.rowPtr, NList.colIdx, r, tmp3N, N, NULL, 0, 0); if (withInfo) ErrorInfo("cudaParticleMD::calcPotentialE calc"); hipLaunchKernelGGL(( accumulate), dim3(1), dim3(threadsMax), sizeof(double)*threadsMax, 0, tmp3N, N, tmp3N); if (withInfo) ErrorInfo("cudaParticleMD::calcPotentialE accumulate"); hipMemcpy(&f, tmp3N, sizeof(float), hipMemcpyDeviceToHost); } else { hipLaunchKernelGGL(( clearArray), dim3(MPnum), dim3(THnum1D), 0, 0, tmp81N, N*81); dim3 _mpnum, _thnum; _mpnum.x = myBlockNum ; _mpnum.y = 27; _mpnum.z = 1; _thnum.x = THnum2D2; _thnum.y = 1; _thnum.z = 1; assert(_thnum.x*_thnum.y <= threadsMax); hipLaunchKernelGGL(( calcF_IJpairWithBlock_F4<class MDpairPotential<potentialLJ> >), dim3(_mpnum), dim3(_thnum), 0, 0, P, r_s, tmp81N, myBlockOffset, blockNeighbor, pid, bindex, N, NULL,NULL, true); hipLaunchKernelGGL(( reduce27_F4), dim3(MPnum), dim3(THnum1D), 0, 0, tmp3N, tmp81N, N); if (withInfo) ErrorInfo("cudaParticleMD::calcPotentialE calc"); hipLaunchKernelGGL(( accumulate), dim3(1), dim3(threadsMax), sizeof(double)*threadsMax, 0, tmp3N, N, tmp3N); if (withInfo) ErrorInfo("cudaParticleMD::calcPotentialE accumulate"); hipMemcpy(&f, tmp3N, sizeof(float), hipMemcpyDeviceToHost); } #else std::cerr << "cudaParticleMD::calcPotentialE() is not implemented" << std::endl; #endif return f; } real cudaParticleMD::calcMV2(void) { hipLaunchKernelGGL(( calcV2_F4), dim3(MPnum), dim3(THnum1D), 0, 0, v, reinterpret_cast<float*>(tmp3N), N); if (withInfo) ErrorInfo("cudaParticleMD::calcV2"); real t=0; DOT(hdl, N, reinterpret_cast<float*>(tmp3N), 1, m, 1, &t); if (withInfo) ErrorInfo("cudaParticleMD::DOT"); return t; } void cudaParticleMD::setM(void) { hipLaunchKernelGGL(( calcReciproc), dim3(MPnum), dim3(THnum1D), 0, 0, minv, m, N); if (withInfo) ErrorInfo("calc reciprocal of minv to m"); } real cudaParticleMD::constTemp(void) { // calc kernel calculation first (before the CUBLAS funcs) real t = calcMV2(); //std::cerr << "constTemp T= " << t << std::endl; real lambda=0; DOT(hdl, N*4, (float*)F, 1, (float*)v, 1, &lambda); lambda /= t; // \sum F_i v_i / \sum m_i v_i^2 std::cerr << "[l=" << lambda << "]"; if (lambda != 0) hipLaunchKernelGGL(( correctConstTemp_F4), dim3(MPnum), dim3(THnum1D), 0, 0, v, F, m, lambda, N); return t/(3*N*kB); } void cudaParticleMD::statMV2(std::ostream &o) { calcMV2(); pthread_mutex_lock(&(mutTMP)); hipMemcpy(&(TMP[0]), tmp3N, sizeof(float)*N, hipMemcpyDeviceToHost); o << std::endl << std::endl; for (int i=0;i<N;++i) o << i << "\t" << TMP[i] << std::endl; pthread_mutex_unlock(&(mutTMP)); } real cudaParticleMD::scaleTemp(real Temp) { const real T0 = calcTemp(); const real s = sqrt(static_cast<real>(3*N-1)/static_cast<real>(3*N)*Temp/T0); hipLaunchKernelGGL(( mulArray), dim3(MPnum), dim3(THnum1D), 0, 0, (float*)v, s, N*4); return s; } void cudaParticleMD::adjustVelocities(real Temp, bool debug) { real v1 = sqrt(kB * Temp / m0); uint32_t __thnum = ::min((uint32_t)1024, threadsMax); if (debug) { std::cerr << std::endl << std::endl << "adjustVelocity currentTemp\t" << calcTemp() << "\tTarget Temp " << Temp << std::endl; hipLaunchKernelGGL(( adjustVelocity_F4), dim3(1), dim3(__thnum), sizeof(double)*__thnum*6, 0, v, __thnum, N, v1, reinterpret_cast<float*>(tmp3N)); hipDeviceSynchronize(); pthread_mutex_lock(&(mutTMP)); hipMemcpy(&(TMP[0]), tmp3N, sizeof(float)*6, hipMemcpyDeviceToHost); std::cerr << "velocity statistics"; for (int i=0;i<6;++i) std::cerr << "\t" << TMP[i]; std::cerr << std::endl; pthread_mutex_unlock(&(mutTMP)); std::cerr << "after adjusted T=" << calcTemp() << std::endl; } else { hipLaunchKernelGGL(( adjustVelocity_F4), dim3(1), dim3(__thnum), sizeof(double)*__thnum*6, 0, v, __thnum, N, v1); } } void cudaParticleMD::setLJparams(const std::vector<real> &p, uint32_t elemnum) { _setLJparams(p, elemnum); } void cudaParticleMD::initialAnnealing(uint32_t anealstep, real dt, real _rc, real _f0, real T) { std::cerr << "performs initial annealing by soft core potential to rmax= " << _rc << " with steps " << anealstep << std::endl; float3 c1, c2; c1.x = cell[0]; c1.y = cell[2]; c1.z = cell[4]; c2.x = cell[1]; c2.y = cell[3]; c2.z = cell[5]; #if defined(CUDACUTOFFBLOCK) dim3 __mpnum, __thnum; __mpnum.x = myBlockNum ; __mpnum.y = 27; __mpnum.z = 1; __thnum.x = THnum2D2; __thnum.y = 1; __thnum.z = 1; std::cerr << "Init Aneal with " << __mpnum.x << "x" << __mpnum.y << " blocks " << __thnum.x << "x" << __thnum.y << " threads" << std::endl; assert(myBlockNum<maxGrid); #else std::cerr << "Init Aneal with " << MPnum << " blocks " << THnum1D << " threads" << std::endl; #endif class potentialSC SC; SC.rc = _rc; SC.f0 = _f0; class MDpairForce<potentialSC> P; P.cx = cell[6]; P.cy = cell[7]; P.cz = cell[8]; P.c0x = sqrt(P.cx) * 2.0; P.c0y = sqrt(P.cy) * 2.0; P.c0z = sqrt(P.cz) * 2.0; P.rmax2 = rmax2; P.typeID = typeID; P.op = SC; for (uint32_t i=0;i<anealstep;++i) { hipLaunchKernelGGL(( clearArray_F4), dim3(MPnum), dim3(THnum1D), 0, 0, F, N); if (withInfo) ErrorInfo("clear Forces"); SC.rc = _rc * (i+1)/anealstep; std::cerr << "\r" << SC.rc << "\t" << std::flush; #if defined(CUDACUTOFFBLOCK) calcBlockID(); hipLaunchKernelGGL(( clearArray), dim3(MPnum), dim3(THnum1D), 0, 0, tmp81N, N*81); P.op.rc = SC.rc; hipLaunchKernelGGL(( calcF_IJpairWithBlock_F4<class MDpairForce<potentialSC> >) , dim3(__mpnum), dim3(__thnum), 0, 0, P, r, tmp81N, myBlockOffset, blockNeighbor, pid, bindex, N); hipLaunchKernelGGL(( reduce27_F4), dim3(MPnum), dim3(THnum1D), 0, 0, F, tmp81N, N); #else P.op.rc = SC.rc; hipLaunchKernelGGL(( calcF_IJpair<class MDpairForce<potentialSC> >), dim3(MPnum), dim3(THnum1D), 0, 0, P, r, F, N); #endif if (withInfo) ErrorInfo("calc Forces by softcore"); // correct force by velocity constTemp(); hipLaunchKernelGGL(( calcA_F4), dim3(MPnum), dim3(THnum1D), 0, 0, a, minv, F, N); if (withInfo) ErrorInfo("calcAcceleration"); hipLaunchKernelGGL(( propagateEuler_F4), dim3(MPnum), dim3(THnum1D), 0, 0, r, dt, v, a, move, N); if (withInfo) ErrorInfo("propagate by Euler"); hipLaunchKernelGGL(( applyPeriodicCondition_F4), dim3(MPnum), dim3(THnum1D), 0, 0, r, c1, c2, N); //adjustVelocities(MPnum, THnum1D, T); std::cerr << "T= " << calcTemp() << std::flush; } std::cerr << std::endl; std::cerr << "temperature scaling: " << scaleTemp(T) << std::endl; std::cerr << "with LJ " << std::endl; class potentialLJ LJ; hipLaunchKernelGGL(( clearArray_F4), dim3(MPnum), dim3(THnum1D), 0, 0, Fold, N); class MDpairForce<potentialLJ> P2; P2.cx = cell[6]; P2.cy = cell[7]; P2.cz = cell[8]; P2.c0x = sqrt(P2.cx) * 2.0; P2.c0y = sqrt(P2.cy) * 2.0; P2.c0z = sqrt(P2.cz) * 2.0; P2.rmax2 = rmax2; P2.typeID = typeID; P2.op = LJ; for (uint32_t i=0;i<anealstep;++i) { std::cerr << i << "\t" << calcTemp() << "\t"; hipLaunchKernelGGL(( clearArray_F4), dim3(MPnum), dim3(THnum1D), 0, 0, F, N); #if defined(CUDACUTOFFBLOCK) calcBlockID(); hipLaunchKernelGGL(( clearArray), dim3(MPnum), dim3(THnum1D), 0, 0, tmp81N, N*81); hipLaunchKernelGGL(( calcF_IJpairWithBlock_F4<class MDpairForce<potentialLJ> >) , dim3(__mpnum), dim3(__thnum), 0, 0, P2, r, tmp81N, myBlockOffset, blockNeighbor, pid, bindex, N); hipLaunchKernelGGL(( reduce27_F4), dim3(MPnum), dim3(THnum1D), 0, 0, F, tmp81N, N); #else hipLaunchKernelGGL(( calcF_IJpair<class MDpairForce<potentialLJ> >), dim3(MPnum), dim3(THnum1D), 0, 0, P2, r, F, N); #endif std::cerr << constTemp() << std::endl; hipLaunchKernelGGL(( propagateVelocityVerlet_F4), dim3(MPnum), dim3(THnum2D*THnum2D/2), 0, 0, r, dt, v, F, Fold, minv, N); //std::cerr << scaleTemp(MPnum, THnum1D, T) << std::endl; hipLaunchKernelGGL(( applyPeriodicCondition_F4), dim3(MPnum), dim3(THnum1D), 0, 0, r, c1, c2, N); } std::cerr << "temperature scaling: " << scaleTemp(T) << std::endl; } void cudaParticleMD::import(const std::vector<ParticleBase> &P) { cudaParticleBase::import(P); double _m0 = std::accumulate(P.begin(), P.end(), 0.0, [](double acc, ParticleBase cur) { acc += cur.m; return acc; }); m0 = _m0 / P.size(); } void cudaParticleMD::makeNList(void) { if (!useNlist) { return; } real rmax2t = (sqrt(rmax2) + thickness) * (sqrt(rmax2) + thickness); // 1) calc coordination number class calcCoordMD_F4 C; C.cx = cell[6]; C.cy = cell[7]; C.cz = cell[8]; C.rmax2 = rmax2t; calcBlockID(); hipLaunchKernelGGL(( clearArray), dim3(MPnum), dim3(THnum1D), 0, 0, tmp81N, N*81); dim3 __mpnum, __thnum; __mpnum.x = myBlockNum ; __mpnum.y = 27; __mpnum.z = 1; __thnum.x = THnum2D2; __thnum.y = 1; __thnum.z = 1; hipLaunchKernelGGL(( calcF_IJpairWithBlock_F4<class calcCoordMD_F4>) , dim3(__mpnum), dim3(__thnum), 0, 0, C, r, tmp81N, myBlockOffset, blockNeighbor, pid, bindex, N); hipLaunchKernelGGL(( clearArray_F4), dim3(MPnum), dim3(THnum1D), 0, 0, F, N); hipLaunchKernelGGL(( reduce27_F4), dim3(MPnum), dim3(THnum1D), 0, 0, F, tmp81N, N); hipLaunchKernelGGL(( real2ulong_F4), dim3(MPnum), dim3(THnum1D), 0, 0, F, &(NList.rowPtr[1]), N); // 2) make row pointer and column index uint32_t nnz = NList.makeRowPtr(); NList.Resize(nnz); class MDNeighborRegister C2; C2.cx = cell[6]; C2.cy = cell[7]; C2.cz = cell[8]; C2.rmax2 = rmax2t; dim3 _mpnum; uint32_t z = 0; _mpnum.x = myBlockNum; _mpnum.y = 1; _mpnum.z = 1; hipLaunchKernelGGL(( makeJlist_WithBlock_F4), dim3(_mpnum), dim3(THnum2D), 0, 0, C2, NList.rowPtr, NList.colIdx, r, z, blockNeighbor, pid, bindex, N, NULL); /** no need for MD neighbor list sortColIdx<<<MPnum, THnum1D>>>(NList.rowPtr, NList.colIdx, N); */ if (withInfo) ErrorInfo("cudaParticleMD::make Neighbor list"); }
3df36534ffbf85061d62c368a75c54b29e1b4b97.cu
#include "cudaParticleMD.hh" #include "kernelfuncs.h" #include <assert.h> #include "kerneltemplate.hh" cudaParticleMD::~cudaParticleMD() { if (m!=NULL) cudaFree(m); if (hdl!=NULL) cublasDestroy(hdl); } void cudaParticleMD::setup(int n) { cudaParticleVV::setup(n); // alloc m 1/(1/mass) cudaMalloc((void **)&m, sizeof(real)*N); if (withInfo) ErrorInfo("malloc m[] on GPU"); // preparation for CUBLAS if (hdl==NULL) cublasCreate(&hdl); } void cudaParticleMD::calcForce(void) { clearArray_F4<<<MPnum, THnum1D>>>(F, N); class potentialLJ LJ; class MDpairForce<potentialLJ> P; P.cx = cell[6]; P.cy = cell[7]; P.cz = cell[8]; P.c0x = sqrt(P.cx) * 2.0; P.c0y = sqrt(P.cy) * 2.0; P.c0z = sqrt(P.cz) * 2.0; P.rmax2 = rmax2; P.typeID = typeID_s; P.op = LJ; #if defined(CUDACUTOFFBLOCK) assert(myBlockNum<maxGrid); if (useNlist) { calcF_IJpairWithList_F4<<<MPnum, THnum1DX>>>(P, NList.rowPtr, NList.colIdx, r, F, N, NULL, 0, 0); } else { clearArray<<<MPnum, THnum1D>>>(tmp81N, N*81); dim3 _mpnum, _thnum; _mpnum.x = myBlockNum ; _mpnum.y = 27; _mpnum.z = 1; _thnum.x = THnum2D2; _thnum.y = 1; _thnum.z = 1; assert(_thnum.x*_thnum.y <= threadsMax); calcF_IJpairWithBlock_F4<class MDpairForce<potentialLJ> ><<<_mpnum, _thnum>>>(P, r_s, tmp81N, myBlockOffset, blockNeighbor, pid, bindex, N, NULL,NULL, true); reduce27_F4<<<MPnum, THnum1D>>>(F, tmp81N, N); } if (withInfo) ErrorInfo("cudaParticleMD::calcForce With Block"); #else calcF_IJpair<class MDpairForce<potentialLJ> ><<<MPnum, THnum1D>>>(P, r, F, N); //calcF_IJpair<class potentialLJ><<<_mpnum, _thnum>>>(LJ, r, F, typeID, N, cell[6], cell[7], cell[8], rmax2); //calcF_LJ<<<_mpnum, _thnum>>>(r, F, typeID, N, cx*cx, cy*cy, cz*cz); if (withInfo) ErrorInfo("cudaParticleMD::calcForce Without Block"); #endif } real cudaParticleMD::calcPotentialE(void) { clearArray_F4<<<MPnum, THnum1D>>>(tmp3N, N); class potentialLJ LJ; class MDpairPotential<potentialLJ> P; P.cx = cell[6]; P.cy = cell[7]; P.cz = cell[8]; P.c0x = sqrt(P.cx) * 2.0; P.c0y = sqrt(P.cy) * 2.0; P.c0z = sqrt(P.cz) * 2.0; P.rmax2 = rmax2; P.typeID = typeID_s; P.op = LJ; real f = 0; #if defined(CUDACUTOFFBLOCK) assert(myBlockNum<maxGrid); if (useNlist) { calcF_IJpairWithList_F4<<<MPnum, THnum1DX>>>(P, NList.rowPtr, NList.colIdx, r, tmp3N, N, NULL, 0, 0); if (withInfo) ErrorInfo("cudaParticleMD::calcPotentialE calc"); accumulate<<<1, threadsMax, sizeof(double)*threadsMax>>>(tmp3N, N, tmp3N); if (withInfo) ErrorInfo("cudaParticleMD::calcPotentialE accumulate"); cudaMemcpy(&f, tmp3N, sizeof(float), cudaMemcpyDeviceToHost); } else { clearArray<<<MPnum, THnum1D>>>(tmp81N, N*81); dim3 _mpnum, _thnum; _mpnum.x = myBlockNum ; _mpnum.y = 27; _mpnum.z = 1; _thnum.x = THnum2D2; _thnum.y = 1; _thnum.z = 1; assert(_thnum.x*_thnum.y <= threadsMax); calcF_IJpairWithBlock_F4<class MDpairPotential<potentialLJ> ><<<_mpnum, _thnum>>>(P, r_s, tmp81N, myBlockOffset, blockNeighbor, pid, bindex, N, NULL,NULL, true); reduce27_F4<<<MPnum, THnum1D>>>(tmp3N, tmp81N, N); if (withInfo) ErrorInfo("cudaParticleMD::calcPotentialE calc"); accumulate<<<1, threadsMax, sizeof(double)*threadsMax>>>(tmp3N, N, tmp3N); if (withInfo) ErrorInfo("cudaParticleMD::calcPotentialE accumulate"); cudaMemcpy(&f, tmp3N, sizeof(float), cudaMemcpyDeviceToHost); } #else std::cerr << "cudaParticleMD::calcPotentialE() is not implemented" << std::endl; #endif return f; } real cudaParticleMD::calcMV2(void) { calcV2_F4<<<MPnum, THnum1D>>>(v, reinterpret_cast<float*>(tmp3N), N); if (withInfo) ErrorInfo("cudaParticleMD::calcV2"); real t=0; DOT(hdl, N, reinterpret_cast<float*>(tmp3N), 1, m, 1, &t); if (withInfo) ErrorInfo("cudaParticleMD::DOT"); return t; } void cudaParticleMD::setM(void) { calcReciproc<<<MPnum, THnum1D>>>(minv, m, N); if (withInfo) ErrorInfo("calc reciprocal of minv to m"); } real cudaParticleMD::constTemp(void) { // calc kernel calculation first (before the CUBLAS funcs) real t = calcMV2(); //std::cerr << "constTemp T= " << t << std::endl; real lambda=0; DOT(hdl, N*4, (float*)F, 1, (float*)v, 1, &lambda); lambda /= t; // \sum F_i v_i / \sum m_i v_i^2 std::cerr << "[l=" << lambda << "]"; if (lambda != 0) correctConstTemp_F4<<<MPnum, THnum1D>>>(v, F, m, lambda, N); return t/(3*N*kB); } void cudaParticleMD::statMV2(std::ostream &o) { calcMV2(); pthread_mutex_lock(&(mutTMP)); cudaMemcpy(&(TMP[0]), tmp3N, sizeof(float)*N, cudaMemcpyDeviceToHost); o << std::endl << std::endl; for (int i=0;i<N;++i) o << i << "\t" << TMP[i] << std::endl; pthread_mutex_unlock(&(mutTMP)); } real cudaParticleMD::scaleTemp(real Temp) { const real T0 = calcTemp(); const real s = sqrt(static_cast<real>(3*N-1)/static_cast<real>(3*N)*Temp/T0); mulArray<<<MPnum, THnum1D>>>((float*)v, s, N*4); return s; } void cudaParticleMD::adjustVelocities(real Temp, bool debug) { real v1 = sqrt(kB * Temp / m0); uint32_t __thnum = std::min((uint32_t)1024, threadsMax); if (debug) { std::cerr << std::endl << std::endl << "adjustVelocity currentTemp\t" << calcTemp() << "\tTarget Temp " << Temp << std::endl; adjustVelocity_F4<<<1, __thnum, sizeof(double)*__thnum*6>>>(v, __thnum, N, v1, reinterpret_cast<float*>(tmp3N)); cudaDeviceSynchronize(); pthread_mutex_lock(&(mutTMP)); cudaMemcpy(&(TMP[0]), tmp3N, sizeof(float)*6, cudaMemcpyDeviceToHost); std::cerr << "velocity statistics"; for (int i=0;i<6;++i) std::cerr << "\t" << TMP[i]; std::cerr << std::endl; pthread_mutex_unlock(&(mutTMP)); std::cerr << "after adjusted T=" << calcTemp() << std::endl; } else { adjustVelocity_F4<<<1, __thnum, sizeof(double)*__thnum*6>>>(v, __thnum, N, v1); } } void cudaParticleMD::setLJparams(const std::vector<real> &p, uint32_t elemnum) { _setLJparams(p, elemnum); } void cudaParticleMD::initialAnnealing(uint32_t anealstep, real dt, real _rc, real _f0, real T) { std::cerr << "performs initial annealing by soft core potential to rmax= " << _rc << " with steps " << anealstep << std::endl; float3 c1, c2; c1.x = cell[0]; c1.y = cell[2]; c1.z = cell[4]; c2.x = cell[1]; c2.y = cell[3]; c2.z = cell[5]; #if defined(CUDACUTOFFBLOCK) dim3 __mpnum, __thnum; __mpnum.x = myBlockNum ; __mpnum.y = 27; __mpnum.z = 1; __thnum.x = THnum2D2; __thnum.y = 1; __thnum.z = 1; std::cerr << "Init Aneal with " << __mpnum.x << "x" << __mpnum.y << " blocks " << __thnum.x << "x" << __thnum.y << " threads" << std::endl; assert(myBlockNum<maxGrid); #else std::cerr << "Init Aneal with " << MPnum << " blocks " << THnum1D << " threads" << std::endl; #endif class potentialSC SC; SC.rc = _rc; SC.f0 = _f0; class MDpairForce<potentialSC> P; P.cx = cell[6]; P.cy = cell[7]; P.cz = cell[8]; P.c0x = sqrt(P.cx) * 2.0; P.c0y = sqrt(P.cy) * 2.0; P.c0z = sqrt(P.cz) * 2.0; P.rmax2 = rmax2; P.typeID = typeID; P.op = SC; for (uint32_t i=0;i<anealstep;++i) { clearArray_F4<<<MPnum, THnum1D>>>(F, N); if (withInfo) ErrorInfo("clear Forces"); SC.rc = _rc * (i+1)/anealstep; std::cerr << "\r" << SC.rc << "\t" << std::flush; #if defined(CUDACUTOFFBLOCK) calcBlockID(); clearArray<<<MPnum, THnum1D>>>(tmp81N, N*81); P.op.rc = SC.rc; calcF_IJpairWithBlock_F4<class MDpairForce<potentialSC> > <<<__mpnum, __thnum>>>(P, r, tmp81N, myBlockOffset, blockNeighbor, pid, bindex, N); reduce27_F4<<<MPnum, THnum1D>>>(F, tmp81N, N); #else P.op.rc = SC.rc; calcF_IJpair<class MDpairForce<potentialSC> ><<<MPnum, THnum1D>>>(P, r, F, N); #endif if (withInfo) ErrorInfo("calc Forces by softcore"); // correct force by velocity constTemp(); calcA_F4<<<MPnum, THnum1D>>>(a, minv, F, N); if (withInfo) ErrorInfo("calcAcceleration"); propagateEuler_F4<<<MPnum, THnum1D>>>(r, dt, v, a, move, N); if (withInfo) ErrorInfo("propagate by Euler"); applyPeriodicCondition_F4<<<MPnum, THnum1D>>>(r, c1, c2, N); //adjustVelocities(MPnum, THnum1D, T); std::cerr << "T= " << calcTemp() << std::flush; } std::cerr << std::endl; std::cerr << "temperature scaling: " << scaleTemp(T) << std::endl; std::cerr << "with LJ " << std::endl; class potentialLJ LJ; clearArray_F4<<<MPnum, THnum1D>>>(Fold, N); class MDpairForce<potentialLJ> P2; P2.cx = cell[6]; P2.cy = cell[7]; P2.cz = cell[8]; P2.c0x = sqrt(P2.cx) * 2.0; P2.c0y = sqrt(P2.cy) * 2.0; P2.c0z = sqrt(P2.cz) * 2.0; P2.rmax2 = rmax2; P2.typeID = typeID; P2.op = LJ; for (uint32_t i=0;i<anealstep;++i) { std::cerr << i << "\t" << calcTemp() << "\t"; clearArray_F4<<<MPnum, THnum1D>>>(F, N); #if defined(CUDACUTOFFBLOCK) calcBlockID(); clearArray<<<MPnum, THnum1D>>>(tmp81N, N*81); calcF_IJpairWithBlock_F4<class MDpairForce<potentialLJ> > <<<__mpnum, __thnum>>>(P2, r, tmp81N, myBlockOffset, blockNeighbor, pid, bindex, N); reduce27_F4<<<MPnum, THnum1D>>>(F, tmp81N, N); #else calcF_IJpair<class MDpairForce<potentialLJ> ><<<MPnum, THnum1D>>>(P2, r, F, N); #endif std::cerr << constTemp() << std::endl; propagateVelocityVerlet_F4<<<MPnum, THnum2D*THnum2D/2>>>(r, dt, v, F, Fold, minv, N); //std::cerr << scaleTemp(MPnum, THnum1D, T) << std::endl; applyPeriodicCondition_F4<<<MPnum, THnum1D>>>(r, c1, c2, N); } std::cerr << "temperature scaling: " << scaleTemp(T) << std::endl; } void cudaParticleMD::import(const std::vector<ParticleBase> &P) { cudaParticleBase::import(P); double _m0 = std::accumulate(P.begin(), P.end(), 0.0, [](double acc, ParticleBase cur) { acc += cur.m; return acc; }); m0 = _m0 / P.size(); } void cudaParticleMD::makeNList(void) { if (!useNlist) { return; } real rmax2t = (sqrt(rmax2) + thickness) * (sqrt(rmax2) + thickness); // 1) calc coordination number class calcCoordMD_F4 C; C.cx = cell[6]; C.cy = cell[7]; C.cz = cell[8]; C.rmax2 = rmax2t; calcBlockID(); clearArray<<<MPnum, THnum1D>>>(tmp81N, N*81); dim3 __mpnum, __thnum; __mpnum.x = myBlockNum ; __mpnum.y = 27; __mpnum.z = 1; __thnum.x = THnum2D2; __thnum.y = 1; __thnum.z = 1; calcF_IJpairWithBlock_F4<class calcCoordMD_F4> <<<__mpnum, __thnum>>>(C, r, tmp81N, myBlockOffset, blockNeighbor, pid, bindex, N); clearArray_F4<<<MPnum, THnum1D>>>(F, N); reduce27_F4<<<MPnum, THnum1D>>>(F, tmp81N, N); real2ulong_F4<<<MPnum, THnum1D>>>(F, &(NList.rowPtr[1]), N); // 2) make row pointer and column index uint32_t nnz = NList.makeRowPtr(); NList.Resize(nnz); class MDNeighborRegister C2; C2.cx = cell[6]; C2.cy = cell[7]; C2.cz = cell[8]; C2.rmax2 = rmax2t; dim3 _mpnum; uint32_t z = 0; _mpnum.x = myBlockNum; _mpnum.y = 1; _mpnum.z = 1; makeJlist_WithBlock_F4<<<_mpnum, THnum2D>>>(C2, NList.rowPtr, NList.colIdx, r, z, blockNeighbor, pid, bindex, N, NULL); /** no need for MD neighbor list sortColIdx<<<MPnum, THnum1D>>>(NList.rowPtr, NList.colIdx, N); */ if (withInfo) ErrorInfo("cudaParticleMD::make Neighbor list"); }
acd7272fa8371c3d8f7126c210ce02b133e441f9.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include <caffe2/core/context_gpu.h> #include <caffe2/operator/squared_l2_op.h> namespace caffe2 { namespace { template <typename T> __global__ void SquaredL2Kernel(const int N, const int D, const T* X, T* Y) { CUDA_1D_KERNEL_LOOP(i, N) { float sum = 0; for (int j = i * D, e = j + D; j != e; j++) { float x = X[j]; sum += x * x; } Y[i] = sum; } } } // namespace template <> bool SquaredL2Op<float, CUDAContext>::RunOnDevice() { auto& X = Input(0); auto* Y = Output(0); int N = X.ndim() > 0 ? X.dim32(0) : 1; int D = N > 0 ? X.size() / N : 0; Y->Resize(vector<TIndex>(size_t(1), N)); hipLaunchKernelGGL(( SquaredL2Kernel), dim3(CAFFE_GET_BLOCKS(N)), dim3(CAFFE_CUDA_NUM_THREADS), 0, context_.cuda_stream(), N, D, X.data<float>(), Y->mutable_data<float>()); return true; } namespace { template <typename T> __global__ void SquaredL2KernelKernel(const int N, const int D, const T* X, const T* dY, T* dX) { CUDA_1D_KERNEL_LOOP(i, N * D) { dX[i] = X[i] * dY[i / D]; } } } // namespace template <> bool SquaredL2GradientOp<float, CUDAContext>::RunOnDevice() { auto& X = Input(0); auto& dY = Input(1); auto* dX = Output(0); int N = X.ndim() > 0 ? X.dim32(0) : 1; int D = N > 0 ? X.size() / N : 0; CAFFE_ENFORCE_EQ(dY.ndim(), 1); CAFFE_ENFORCE_EQ(dY.dim32(0), N); dX->ResizeLike(X); hipLaunchKernelGGL(( SquaredL2KernelKernel<float>) , dim3(CAFFE_GET_BLOCKS(N * D)), dim3(CAFFE_CUDA_NUM_THREADS), 0, context_.cuda_stream(), N, D, X.data<float>(), dY.data<float>(), dX->mutable_data<float>()); return true; } REGISTER_CUDA_OPERATOR(SquaredL2, SquaredL2Op<float, CUDAContext>); REGISTER_CUDA_OPERATOR(SquaredL2Gradient, SquaredL2GradientOp<float, CUDAContext>); } // namespace caffe2
acd7272fa8371c3d8f7126c210ce02b133e441f9.cu
#include <caffe2/core/context_gpu.h> #include <caffe2/operator/squared_l2_op.h> namespace caffe2 { namespace { template <typename T> __global__ void SquaredL2Kernel(const int N, const int D, const T* X, T* Y) { CUDA_1D_KERNEL_LOOP(i, N) { float sum = 0; for (int j = i * D, e = j + D; j != e; j++) { float x = X[j]; sum += x * x; } Y[i] = sum; } } } // namespace template <> bool SquaredL2Op<float, CUDAContext>::RunOnDevice() { auto& X = Input(0); auto* Y = Output(0); int N = X.ndim() > 0 ? X.dim32(0) : 1; int D = N > 0 ? X.size() / N : 0; Y->Resize(vector<TIndex>(size_t(1), N)); SquaredL2Kernel<<<CAFFE_GET_BLOCKS(N), CAFFE_CUDA_NUM_THREADS, 0, context_.cuda_stream()>>>(N, D, X.data<float>(), Y->mutable_data<float>()); return true; } namespace { template <typename T> __global__ void SquaredL2KernelKernel(const int N, const int D, const T* X, const T* dY, T* dX) { CUDA_1D_KERNEL_LOOP(i, N * D) { dX[i] = X[i] * dY[i / D]; } } } // namespace template <> bool SquaredL2GradientOp<float, CUDAContext>::RunOnDevice() { auto& X = Input(0); auto& dY = Input(1); auto* dX = Output(0); int N = X.ndim() > 0 ? X.dim32(0) : 1; int D = N > 0 ? X.size() / N : 0; CAFFE_ENFORCE_EQ(dY.ndim(), 1); CAFFE_ENFORCE_EQ(dY.dim32(0), N); dX->ResizeLike(X); SquaredL2KernelKernel<float> <<<CAFFE_GET_BLOCKS(N * D), CAFFE_CUDA_NUM_THREADS, 0, context_.cuda_stream()>>>(N, D, X.data<float>(), dY.data<float>(), dX->mutable_data<float>()); return true; } REGISTER_CUDA_OPERATOR(SquaredL2, SquaredL2Op<float, CUDAContext>); REGISTER_CUDA_OPERATOR(SquaredL2Gradient, SquaredL2GradientOp<float, CUDAContext>); } // namespace caffe2
a0ef4ce35677bd8859fb278192404a711677040b.hip
// !!! This is a file automatically generated by hipify!!! /** * Copyright 1993-2012 NVIDIA Corporation. All rights reserved. * * Please refer to the NVIDIA end user license agreement (EULA) associated * with this source code for terms and conditions that govern your use of * this software. Any use, reproduction, disclosure, or distribution of * this software and related documentation outside the terms of the EULA * is strictly prohibited. */ #include <stdio.h> #include <stdlib.h> #include <iostream> #include <algorithm> #include <hip/hip_runtime.h> struct Task { uint id; float time; Task(uint id, float time) { this->id = id; this->time = time; } Task() { this->id = 0; this->time = 0; } bool operator() (Task i,Task j) { return (i.time < j.time); } }; struct Machine { int id; float cost; Machine() { this->id = 0; this->cost = 0; } bool operator() (Machine i,Machine j) { return (i.cost < j.cost); } }; void min_min(Task* tasks, float* completion_times, int* task_map, bool* task_scheduled, Machine* machines, int t, int m, int max_time) { uint count = 0; uint q = 0; while(count < t) { float current_time = 0; int j = machines[q].id; int i = 0; while(count < t && i < t) { int task_id = tasks[j * t + i].id; if (!task_scheduled[task_id]) { current_time = completion_times[j] + tasks[j * t + i].time; if(current_time > max_time){ i++; continue; } task_scheduled[task_id] = true; task_map[task_id] = j; completion_times[j] = current_time; count++; } i++; } q++; if(q == m && count != t) { printf("### ERROR ###\n"); break; } } } void machine_sorting(Machine* machines, int m) { std::stable_sort (&machines[0], &machines[0]+m, Machine()); } void segmented_sorting(Task* tasks, int m, int t) { for(int i = 0; i < m; i++) { int j = i*t; std::stable_sort (&tasks[j], &tasks[j]+t, Task()); } } template<typename T> void print(T* vec, uint t, uint m) { std::cout << "\n"; for (uint i = 0; i < t; i++) { for (uint j = 0; j < m; j++) { std::cout << vec[i * m + j] << " "; } std::cout << "\n"; } } template<typename T> void print(T* vec, uint t) { std::cout << "\n"; for (uint i = 0; i < t; i++) { std::cout << vec[i] << " "; } std::cout << "\n"; } void print(Task* vec, uint t, uint m) { std::cout << "\n"; for (uint j = 0; j < m; j++) { for (uint i = 0; i < t; i++) { std::cout << "id=" << vec[j * t + i].id << " time=" << vec[j * t + i].time << "\t"; } std::cout << "\n"; } } void print(Machine* vec, uint m) { std::cout << "\n"; for (uint j = 0; j < m; j++) { std::cout << "id=" << vec[j].id << " time=" << vec[j].cost << "\t"; } std::cout << "\n"; } void print(float* completion_times, Machine* vec, uint m) { float sum = 0; for (uint j = 0; j < m; j++) { uint id = vec[j].id; float cost = vec[j].cost * completion_times[id]; std::cout << vec[j].cost << " * " << completion_times[id] << " = " << cost << "\n"; sum += cost; } std::cout << "Custo Total: " << sum << "\n"; } int main(int argc, char **argv) { int t, m; float max_time, aux; aux = scanf("%d", &t); aux = scanf("%d", &m); aux = scanf("%f", &max_time); //std::cout << "t=" << t << " m=" << m << "\n"; Task *tasks = (Task *) malloc(sizeof(Task) * (t * m)); bool *task_scheduled = (bool *) malloc(sizeof(bool) * t); int *task_map = (int *) malloc(sizeof(int) * (t)); float *completion_times = (float *) malloc(sizeof(float) * (m)); Machine *machines = (Machine *) malloc(sizeof(Machine) * (m)); // Read matrix task machine for (int i = 0; i < t; i++) { for (int j = 0; j < m; j++) { int a = scanf("%f", &aux); tasks[j * t + i].id = i; tasks[j * t + i].time = aux; completion_times[j] = 0; } task_map[i] = -1; task_scheduled[i] = false; } //print(tasks, t, m); // Reading vector of costs for each machine for (int j = 0; j < m; j++) { int a = scanf("%f", &aux); machines[j].id = j; machines[j].cost = aux; } hipEvent_t start, stop; hipEventCreate(&start); hipEventCreate(&stop); hipEventRecord(start); machine_sorting(machines, m); //print(machines, m); //segmented_sorting(tasks, m, t); //print(tasks,t,m); min_min(tasks, completion_times, task_map, task_scheduled, machines, t, m, max_time); hipEventRecord(stop); if (ELAPSED_TIME == 1) { hipEventSynchronize(stop); float milliseconds = 0; hipEventElapsedTime(&milliseconds, start, stop); std::cout << milliseconds << "\n"; } else { //print(tasks, t, m); //print(completion_times, m); print(completion_times, machines, m); //print(task_scheduled, t); //print(task_map, t, m); } free(task_scheduled); free(task_map); free(tasks); free(completion_times); return 0; }
a0ef4ce35677bd8859fb278192404a711677040b.cu
/** * Copyright 1993-2012 NVIDIA Corporation. All rights reserved. * * Please refer to the NVIDIA end user license agreement (EULA) associated * with this source code for terms and conditions that govern your use of * this software. Any use, reproduction, disclosure, or distribution of * this software and related documentation outside the terms of the EULA * is strictly prohibited. */ #include <stdio.h> #include <stdlib.h> #include <iostream> #include <algorithm> #include <cuda.h> struct Task { uint id; float time; Task(uint id, float time) { this->id = id; this->time = time; } Task() { this->id = 0; this->time = 0; } bool operator() (Task i,Task j) { return (i.time < j.time); } }; struct Machine { int id; float cost; Machine() { this->id = 0; this->cost = 0; } bool operator() (Machine i,Machine j) { return (i.cost < j.cost); } }; void min_min(Task* tasks, float* completion_times, int* task_map, bool* task_scheduled, Machine* machines, int t, int m, int max_time) { uint count = 0; uint q = 0; while(count < t) { float current_time = 0; int j = machines[q].id; int i = 0; while(count < t && i < t) { int task_id = tasks[j * t + i].id; if (!task_scheduled[task_id]) { current_time = completion_times[j] + tasks[j * t + i].time; if(current_time > max_time){ i++; continue; } task_scheduled[task_id] = true; task_map[task_id] = j; completion_times[j] = current_time; count++; } i++; } q++; if(q == m && count != t) { printf("### ERROR ###\n"); break; } } } void machine_sorting(Machine* machines, int m) { std::stable_sort (&machines[0], &machines[0]+m, Machine()); } void segmented_sorting(Task* tasks, int m, int t) { for(int i = 0; i < m; i++) { int j = i*t; std::stable_sort (&tasks[j], &tasks[j]+t, Task()); } } template<typename T> void print(T* vec, uint t, uint m) { std::cout << "\n"; for (uint i = 0; i < t; i++) { for (uint j = 0; j < m; j++) { std::cout << vec[i * m + j] << " "; } std::cout << "\n"; } } template<typename T> void print(T* vec, uint t) { std::cout << "\n"; for (uint i = 0; i < t; i++) { std::cout << vec[i] << " "; } std::cout << "\n"; } void print(Task* vec, uint t, uint m) { std::cout << "\n"; for (uint j = 0; j < m; j++) { for (uint i = 0; i < t; i++) { std::cout << "id=" << vec[j * t + i].id << " time=" << vec[j * t + i].time << "\t"; } std::cout << "\n"; } } void print(Machine* vec, uint m) { std::cout << "\n"; for (uint j = 0; j < m; j++) { std::cout << "id=" << vec[j].id << " time=" << vec[j].cost << "\t"; } std::cout << "\n"; } void print(float* completion_times, Machine* vec, uint m) { float sum = 0; for (uint j = 0; j < m; j++) { uint id = vec[j].id; float cost = vec[j].cost * completion_times[id]; std::cout << vec[j].cost << " * " << completion_times[id] << " = " << cost << "\n"; sum += cost; } std::cout << "Custo Total: " << sum << "\n"; } int main(int argc, char **argv) { int t, m; float max_time, aux; aux = scanf("%d", &t); aux = scanf("%d", &m); aux = scanf("%f", &max_time); //std::cout << "t=" << t << " m=" << m << "\n"; Task *tasks = (Task *) malloc(sizeof(Task) * (t * m)); bool *task_scheduled = (bool *) malloc(sizeof(bool) * t); int *task_map = (int *) malloc(sizeof(int) * (t)); float *completion_times = (float *) malloc(sizeof(float) * (m)); Machine *machines = (Machine *) malloc(sizeof(Machine) * (m)); // Read matrix task machine for (int i = 0; i < t; i++) { for (int j = 0; j < m; j++) { int a = scanf("%f", &aux); tasks[j * t + i].id = i; tasks[j * t + i].time = aux; completion_times[j] = 0; } task_map[i] = -1; task_scheduled[i] = false; } //print(tasks, t, m); // Reading vector of costs for each machine for (int j = 0; j < m; j++) { int a = scanf("%f", &aux); machines[j].id = j; machines[j].cost = aux; } cudaEvent_t start, stop; cudaEventCreate(&start); cudaEventCreate(&stop); cudaEventRecord(start); machine_sorting(machines, m); //print(machines, m); //segmented_sorting(tasks, m, t); //print(tasks,t,m); min_min(tasks, completion_times, task_map, task_scheduled, machines, t, m, max_time); cudaEventRecord(stop); if (ELAPSED_TIME == 1) { cudaEventSynchronize(stop); float milliseconds = 0; cudaEventElapsedTime(&milliseconds, start, stop); std::cout << milliseconds << "\n"; } else { //print(tasks, t, m); //print(completion_times, m); print(completion_times, machines, m); //print(task_scheduled, t); //print(task_map, t, m); } free(task_scheduled); free(task_map); free(tasks); free(completion_times); return 0; }
ed3fc4070385635fa77048bb1c437b2a9db4ac71.hip
// !!! This is a file automatically generated by hipify!!! #include <stdio.h> #include <math.h> #include <time.h> #include <hip/hip_runtime.h> //Code written by Alan Fleming //CONSTANTS #define MATRIXSIZE 2048 #define BLOCKSIZE 1024 void cpuHistogram(int* input, int* histogram, int size) { for(int i = 0; i < size; i++) { histogram[input[i]]++; } } __global__ void histogram(int* input, int* histogram, int size) { //get starting index for thread int i = threadIdx.x + blockIdx.x * blockDim.x; //calculate stride int stride = blockDim.x * gridDim.x; //preform histogram calculation while( i < size) { atomicAdd( &(histogram[input[i]]), 1); i += stride; } } //currently does not work for block sizes smaller than 256 __global__ void sharedHistogram(int* input, int* histogram, int size) { //initialize shared memory for the block __shared__ int privateHistogram[256]; if(threadIdx.x < 256) privateHistogram[threadIdx.x] = 0; __syncthreads(); //get starting index for thread int i = threadIdx.x + blockIdx.x * blockDim.x; //calculate stride int stride = blockDim.x * gridDim.x; //preform histogram calculation while( i < size) { atomicAdd( &(privateHistogram[input[i]]), 1); i += stride; } //ensure all threads have finished their additions __syncthreads(); //add private histogram to public histogram if(threadIdx.x < 256) { atomicAdd( &(histogram[threadIdx.x]), privateHistogram[threadIdx.x]); } } int main() { int *input = (int *)malloc(sizeof(int) * MATRIXSIZE); //allocate space for array int *cpuResult = (int *)malloc(sizeof(int) * 256); //allocate space for cpu output array int *basicGPUResult = (int *)malloc(sizeof(int) * 256); //allocate space for gpu output array using global memory int *sharedGPUResult = (int *)malloc(sizeof(int) * 256); //allocate space for gpu output array using shared memory //intialize the input array int init = 1325; for(int i=0; i < MATRIXSIZE; i++){ init= 3125 * init % 65537; input[i]= init % 256; } //clear the output arrays to ensure proper adding for(int i = 0; i < 256; i++) { cpuResult[i] = 0; basicGPUResult[i] = 0; sharedGPUResult[i] = 0; } //Test CPU //Get start time clock_t t1 = clock(); //Calculate reduction cpuHistogram(input, cpuResult, MATRIXSIZE); //Get stop time clock_t t2 = clock(); //Calculate runtime float cpuTime= (float(t2-t1)/CLOCKS_PER_SEC*1000); //Allocate memory on GPU compution. dev_b is used to store the results of the first pass of reduction int *dev_input, *dev_basicGPUResult, *dev_sharedGPUResult; hipMalloc((void **)(&dev_input), MATRIXSIZE *sizeof(int)); hipMalloc((void **)(&dev_basicGPUResult), 256 *sizeof(int)); hipMalloc((void **)(&dev_sharedGPUResult), 256 *sizeof(int)); //copy memory to gpu hipMemcpy(dev_input, input, MATRIXSIZE * sizeof(int), hipMemcpyHostToDevice); hipMemcpy(dev_basicGPUResult, basicGPUResult, 256 * sizeof(int), hipMemcpyHostToDevice); hipMemcpy(dev_sharedGPUResult, sharedGPUResult, 256 * sizeof(int), hipMemcpyHostToDevice); //calculate dimentions for gpu dim3 dimBlock(BLOCKSIZE); dim3 dimGrid(ceil(double(MATRIXSIZE)/dimBlock.x)); //~~WITHOUT SHARED MEMORY~~ //Set up cuda events for recording runtime hipEvent_t start,stop; float basicGPUTime; hipEventCreate(&start); hipEventCreate(&stop); hipEventRecord(start,0); //calculate histogram without shared memory hipLaunchKernelGGL(( histogram), dim3(dimGrid), dim3(dimBlock), 0, 0, dev_input, dev_basicGPUResult, MATRIXSIZE); //calculate runtime hipEventRecord(stop,0); hipEventSynchronize(stop); hipEventElapsedTime(&basicGPUTime,start,stop); //destroy cuda events hipEventDestroy(start); hipEventDestroy(stop); //copy sum from gpu hipMemcpy(basicGPUResult, dev_basicGPUResult, 256 * sizeof(int), hipMemcpyDeviceToHost); //print speedup printf("--WITHOUT SHARED MEMORY--\nCPU Runtime: %f\nGpu Runtime: %f\nSpeedup: %f\n", (double)cpuTime, (double)basicGPUTime, double(cpuTime / basicGPUTime)); //verify results bool valid = true; for(int i = 0; i < 256; i++) { if(cpuResult[i] != basicGPUResult[i]) { valid = false; break; } } if(valid) { printf("TEST PASSED\n"); } else { printf("TEST FAILED\n"); } //~~WITH SHARED MEMORY~~ //Set up cuda events for recording runtime float sharedGPUTime; hipEventCreate(&start); hipEventCreate(&stop); hipEventRecord(start,0); //calculate histogram with shared memory hipLaunchKernelGGL(( histogram), dim3(dimGrid), dim3(dimBlock), 0, 0, dev_input, dev_sharedGPUResult, MATRIXSIZE); //calculate runtime hipEventRecord(stop,0); hipEventSynchronize(stop); hipEventElapsedTime(&sharedGPUTime,start,stop); //destroy cuda events hipEventDestroy(start); hipEventDestroy(stop); //copy sum from gpu hipMemcpy(sharedGPUResult, dev_sharedGPUResult, 256 * sizeof(int), hipMemcpyDeviceToHost); //print speedup printf("--WITH SHARED MEMORY--\nCPU Runtime: %f\nGpu Runtime: %f\nSpeedup: %f\n", (double)cpuTime, (double)sharedGPUTime, double(cpuTime / sharedGPUTime)); //verify results valid = true; for(int i = 0; i < 256; i++) { if(cpuResult[i] != sharedGPUResult[i]) { valid = false; break; } } if(valid) { printf("TEST PASSED\n"); } else { printf("TEST FAILED\n"); } //free up memory before returning free(input); free(cpuResult); free(basicGPUResult); free(sharedGPUResult); hipFree(dev_input); hipFree(dev_basicGPUResult); hipFree(dev_sharedGPUResult); return 0; }
ed3fc4070385635fa77048bb1c437b2a9db4ac71.cu
#include <stdio.h> #include <math.h> #include <time.h> #include <cuda.h> //Code written by Alan Fleming //CONSTANTS #define MATRIXSIZE 2048 #define BLOCKSIZE 1024 void cpuHistogram(int* input, int* histogram, int size) { for(int i = 0; i < size; i++) { histogram[input[i]]++; } } __global__ void histogram(int* input, int* histogram, int size) { //get starting index for thread int i = threadIdx.x + blockIdx.x * blockDim.x; //calculate stride int stride = blockDim.x * gridDim.x; //preform histogram calculation while( i < size) { atomicAdd( &(histogram[input[i]]), 1); i += stride; } } //currently does not work for block sizes smaller than 256 __global__ void sharedHistogram(int* input, int* histogram, int size) { //initialize shared memory for the block __shared__ int privateHistogram[256]; if(threadIdx.x < 256) privateHistogram[threadIdx.x] = 0; __syncthreads(); //get starting index for thread int i = threadIdx.x + blockIdx.x * blockDim.x; //calculate stride int stride = blockDim.x * gridDim.x; //preform histogram calculation while( i < size) { atomicAdd( &(privateHistogram[input[i]]), 1); i += stride; } //ensure all threads have finished their additions __syncthreads(); //add private histogram to public histogram if(threadIdx.x < 256) { atomicAdd( &(histogram[threadIdx.x]), privateHistogram[threadIdx.x]); } } int main() { int *input = (int *)malloc(sizeof(int) * MATRIXSIZE); //allocate space for array int *cpuResult = (int *)malloc(sizeof(int) * 256); //allocate space for cpu output array int *basicGPUResult = (int *)malloc(sizeof(int) * 256); //allocate space for gpu output array using global memory int *sharedGPUResult = (int *)malloc(sizeof(int) * 256); //allocate space for gpu output array using shared memory //intialize the input array int init = 1325; for(int i=0; i < MATRIXSIZE; i++){ init= 3125 * init % 65537; input[i]= init % 256; } //clear the output arrays to ensure proper adding for(int i = 0; i < 256; i++) { cpuResult[i] = 0; basicGPUResult[i] = 0; sharedGPUResult[i] = 0; } //Test CPU //Get start time clock_t t1 = clock(); //Calculate reduction cpuHistogram(input, cpuResult, MATRIXSIZE); //Get stop time clock_t t2 = clock(); //Calculate runtime float cpuTime= (float(t2-t1)/CLOCKS_PER_SEC*1000); //Allocate memory on GPU compution. dev_b is used to store the results of the first pass of reduction int *dev_input, *dev_basicGPUResult, *dev_sharedGPUResult; cudaMalloc((void **)(&dev_input), MATRIXSIZE *sizeof(int)); cudaMalloc((void **)(&dev_basicGPUResult), 256 *sizeof(int)); cudaMalloc((void **)(&dev_sharedGPUResult), 256 *sizeof(int)); //copy memory to gpu cudaMemcpy(dev_input, input, MATRIXSIZE * sizeof(int), cudaMemcpyHostToDevice); cudaMemcpy(dev_basicGPUResult, basicGPUResult, 256 * sizeof(int), cudaMemcpyHostToDevice); cudaMemcpy(dev_sharedGPUResult, sharedGPUResult, 256 * sizeof(int), cudaMemcpyHostToDevice); //calculate dimentions for gpu dim3 dimBlock(BLOCKSIZE); dim3 dimGrid(ceil(double(MATRIXSIZE)/dimBlock.x)); //~~WITHOUT SHARED MEMORY~~ //Set up cuda events for recording runtime cudaEvent_t start,stop; float basicGPUTime; cudaEventCreate(&start); cudaEventCreate(&stop); cudaEventRecord(start,0); //calculate histogram without shared memory histogram<<<dimGrid, dimBlock>>>(dev_input, dev_basicGPUResult, MATRIXSIZE); //calculate runtime cudaEventRecord(stop,0); cudaEventSynchronize(stop); cudaEventElapsedTime(&basicGPUTime,start,stop); //destroy cuda events cudaEventDestroy(start); cudaEventDestroy(stop); //copy sum from gpu cudaMemcpy(basicGPUResult, dev_basicGPUResult, 256 * sizeof(int), cudaMemcpyDeviceToHost); //print speedup printf("--WITHOUT SHARED MEMORY--\nCPU Runtime: %f\nGpu Runtime: %f\nSpeedup: %f\n", (double)cpuTime, (double)basicGPUTime, double(cpuTime / basicGPUTime)); //verify results bool valid = true; for(int i = 0; i < 256; i++) { if(cpuResult[i] != basicGPUResult[i]) { valid = false; break; } } if(valid) { printf("TEST PASSED\n"); } else { printf("TEST FAILED\n"); } //~~WITH SHARED MEMORY~~ //Set up cuda events for recording runtime float sharedGPUTime; cudaEventCreate(&start); cudaEventCreate(&stop); cudaEventRecord(start,0); //calculate histogram with shared memory histogram<<<dimGrid, dimBlock>>>(dev_input, dev_sharedGPUResult, MATRIXSIZE); //calculate runtime cudaEventRecord(stop,0); cudaEventSynchronize(stop); cudaEventElapsedTime(&sharedGPUTime,start,stop); //destroy cuda events cudaEventDestroy(start); cudaEventDestroy(stop); //copy sum from gpu cudaMemcpy(sharedGPUResult, dev_sharedGPUResult, 256 * sizeof(int), cudaMemcpyDeviceToHost); //print speedup printf("--WITH SHARED MEMORY--\nCPU Runtime: %f\nGpu Runtime: %f\nSpeedup: %f\n", (double)cpuTime, (double)sharedGPUTime, double(cpuTime / sharedGPUTime)); //verify results valid = true; for(int i = 0; i < 256; i++) { if(cpuResult[i] != sharedGPUResult[i]) { valid = false; break; } } if(valid) { printf("TEST PASSED\n"); } else { printf("TEST FAILED\n"); } //free up memory before returning free(input); free(cpuResult); free(basicGPUResult); free(sharedGPUResult); cudaFree(dev_input); cudaFree(dev_basicGPUResult); cudaFree(dev_sharedGPUResult); return 0; }
00ba7dd061e09c3e8fd3940ece094659d97d8f3e.hip
// !!! This is a file automatically generated by hipify!!! #include <hip/hip_runtime.h> #include "gtest/gtest.h" #include "helper_math.cuh" #include "strel.cuh" // For convenience using namespace gpho; using namespace gpho::detail; TEST(FlatBallApproxTest, NonPositiveRadius) { { SCOPED_TRACE("Radius = -1"); auto lines = flatBallApprox(-1); for (const auto& line : lines) { ASSERT_EQ(line.length, 0); } } { SCOPED_TRACE("Radius = 0"); auto lines = flatBallApprox(0); for (const auto& line : lines) { ASSERT_EQ(line.length, 0); } } } TEST(FlatBallApproxTest, ValidRadius) { auto lines = flatBallApprox(7); int expectedLengths[13] = { 3, 3, 3, 2, 2, 2, 2, 2, 2, 3, 3, 3, 3 }; // This array is copied directly from flatBallApprox, so having it here is mostly to check for typos int3 expectedSteps[13] = { { 1, 0, 0 }, { 0, -1, 0 }, { 0, 0, 1 }, { 1, 1, 0 }, {-1, 1, 0 }, {-1, 0, -1 }, { 1, 0, -1 }, { 0, 1, 1 }, { 0, -1, 1 }, {-1, -1, -1 }, { 1, 1, -1 }, { 1, -1, 1 }, {-1, 1, 1 } }; for (int i = 0; i < 13; ++i) { ASSERT_EQ(lines[i].length, expectedLengths[i]); ASSERT_EQ(lines[i].step, expectedSteps[i]); } } TEST(FlatBallApproxTest, ApproximationTypes) { { SCOPED_TRACE("Inside"); auto lines = flatBallApprox(23, APPROX_INSIDE); int expectedLengths[13] = { 7, 7, 7, 6, 6, 6, 6, 6, 6, 5, 5, 5, 5 }; for (int i = 0; i < 13; ++i) { ASSERT_EQ(lines[i].length, expectedLengths[i]); } } { SCOPED_TRACE("Best"); auto lines = flatBallApprox(23, APPROX_BEST); int expectedLengths[13] = { 9, 9, 9, 6, 6, 6, 6, 6, 6, 5, 5, 5, 5 }; for (int i = 0; i < 13; ++i) { ASSERT_EQ(lines[i].length, expectedLengths[i]); } } { SCOPED_TRACE("Outside"); auto lines = flatBallApprox(23, APPROX_OUTSIDE); int expectedLengths[13] = { 11, 11, 11, 6, 6, 6, 6, 6, 6, 5, 5, 5, 5 }; for (int i = 0; i < 13; ++i) { ASSERT_EQ(lines[i].length, expectedLengths[i]); } } }
00ba7dd061e09c3e8fd3940ece094659d97d8f3e.cu
#include <cuda_runtime.h> #include "gtest/gtest.h" #include "helper_math.cuh" #include "strel.cuh" // For convenience using namespace gpho; using namespace gpho::detail; TEST(FlatBallApproxTest, NonPositiveRadius) { { SCOPED_TRACE("Radius = -1"); auto lines = flatBallApprox(-1); for (const auto& line : lines) { ASSERT_EQ(line.length, 0); } } { SCOPED_TRACE("Radius = 0"); auto lines = flatBallApprox(0); for (const auto& line : lines) { ASSERT_EQ(line.length, 0); } } } TEST(FlatBallApproxTest, ValidRadius) { auto lines = flatBallApprox(7); int expectedLengths[13] = { 3, 3, 3, 2, 2, 2, 2, 2, 2, 3, 3, 3, 3 }; // This array is copied directly from flatBallApprox, so having it here is mostly to check for typos int3 expectedSteps[13] = { { 1, 0, 0 }, { 0, -1, 0 }, { 0, 0, 1 }, { 1, 1, 0 }, {-1, 1, 0 }, {-1, 0, -1 }, { 1, 0, -1 }, { 0, 1, 1 }, { 0, -1, 1 }, {-1, -1, -1 }, { 1, 1, -1 }, { 1, -1, 1 }, {-1, 1, 1 } }; for (int i = 0; i < 13; ++i) { ASSERT_EQ(lines[i].length, expectedLengths[i]); ASSERT_EQ(lines[i].step, expectedSteps[i]); } } TEST(FlatBallApproxTest, ApproximationTypes) { { SCOPED_TRACE("Inside"); auto lines = flatBallApprox(23, APPROX_INSIDE); int expectedLengths[13] = { 7, 7, 7, 6, 6, 6, 6, 6, 6, 5, 5, 5, 5 }; for (int i = 0; i < 13; ++i) { ASSERT_EQ(lines[i].length, expectedLengths[i]); } } { SCOPED_TRACE("Best"); auto lines = flatBallApprox(23, APPROX_BEST); int expectedLengths[13] = { 9, 9, 9, 6, 6, 6, 6, 6, 6, 5, 5, 5, 5 }; for (int i = 0; i < 13; ++i) { ASSERT_EQ(lines[i].length, expectedLengths[i]); } } { SCOPED_TRACE("Outside"); auto lines = flatBallApprox(23, APPROX_OUTSIDE); int expectedLengths[13] = { 11, 11, 11, 6, 6, 6, 6, 6, 6, 5, 5, 5, 5 }; for (int i = 0; i < 13; ++i) { ASSERT_EQ(lines[i].length, expectedLengths[i]); } } }
69310691d14bc120180217b0833df52574fc245b.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" // Copyright 2016 Google Inc, NYU. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. #include <TH.h> #include <THH.h> #include <luaT.h> #include <assert.h> #include <hipsparse.h> #include <rocblas.h> #include <float.h> #include <algorithm> #include "THHDeviceTensor.cuh" #include "THHDeviceTensorUtils.cuh" #include "THHDeviceUtils.cuh" #include "THHReduceApplyUtils.cuh" #include "generic/advect_type.h" #include "third_party/cell_type.h" #include "third_party/grid.cu.h" #include "generic/int3.cu.h" #include "generic/vec3.cu.h" // The PCG code also does some processing on the CPU, and so we need the // headers for grid, vec3, etc. #define torch_(NAME) TH_CONCAT_3(torch_, Real, NAME) #define torch_Tensor TH_CONCAT_STRING_3(torch., Real, Tensor) #define tfluids_(NAME) TH_CONCAT_3(tfluids_, Real, NAME) #define real float #define accreal double #define Real Float #define THInf FLT_MAX #define TH_REAL_IS_FLOAT #include "generic/vec3.h" #include "third_party/grid.h" #include "generic/find_connected_fluid_components.h" #undef accreal #undef real #undef Real #undef THInf #undef TH_REAL_IS_FLOAT #include "generic/calc_line_trace.cu" const int threads_per_block = 512; // Might need 256 for old SM. const int64_t cuda_num_threads = 1024; // Might need 256 for old SM. // This is REALLY ugly. But unfortunately cutorch_getstate() in // cutorch/torch/util.h is not exposed externally. We could call // cutorch.getState() from lua and pass in the struct into all the tfluids c // functions (as Soumith did with nn and cunn), but I think this is also just // as ugly. Instead lets just redefine cutorch_getstate and hope nothing // breaks :-( struct THCState* cutorch_getstate(lua_State* L) { lua_getglobal(L, "cutorch"); lua_getfield(L, -1, "_state"); struct THCState* state = reinterpret_cast<THCState*>(lua_touserdata(L, -1)); lua_pop(L, 2); return state; } // ***************************************************************************** // LaunchKernel // ***************************************************************************** // A simple helper function to reduce the amount of boiler plate code required // to launch a kernel (it also cuts down the number of potential bugs). // // All our kernels use an unknown number of parameters, so we'll need to // pass in a function pointer with the correct signature as well as the // arg lists. // // @template TFuncPtr: kernel func ptr. The compiler will autocomplete this! // @template Args: Again, you do not need to define it (see emptyDomain). // @param: func - the kernel function to call. // @param: <x>size - The size of the domain that the kernel will be launched // over. This MUST match the domain used in GetKernelIndices. // @param: args - the variable size argument list that the kernel takes as // input. template <typename TFuncPtr, typename... Args> // C++11 varadic function static void LaunchKernel(lua_State* L, TFuncPtr func, const int bsize, const int csize, const int zsize, const int ysize, const int xsize, Args... args) { THCState* state = cutorch_getstate(L); // Create the kernel grid and block sizes. // TODO(tompson): What if csize is 1 (i.e. scalar domains). Is this slower? int nplane = xsize * ysize * zsize; dim3 grid_size(THCCeilDiv(nplane, threads_per_block), csize, bsize); dim3 block_size(nplane > threads_per_block ? threads_per_block : nplane); // Call the function. hipLaunchKernelGGL(( func), dim3(grid_size), dim3(block_size), 0, THCState_getCurrentStream(state), args...); } // Same as above, but on a one of our Grid objects. template <typename TFuncPtr, typename... Args> // C++11 varadic function static void LaunchKernel(lua_State* L, TFuncPtr func, const CudaGridBase& domain, Args... args) { THCState* state = cutorch_getstate(L); const int xsize = domain.xsize(); const int ysize = domain.ysize(); const int zsize = domain.zsize(); const int csize = domain.nchan(); const int bsize = domain.nbatch(); // Create the kernel grid and block sizes. // TODO(tompson): What if csize is 1 (i.e. scalar domains). Is this slower? int nplane = xsize * ysize * zsize; dim3 grid_size(THCCeilDiv(nplane, threads_per_block), csize, bsize); dim3 block_size(nplane > threads_per_block ? threads_per_block : nplane); // Call the function. hipLaunchKernelGGL(( func), dim3(grid_size), dim3(block_size), 0, THCState_getCurrentStream(state), args...); THCudaCheck(hipGetLastError()); } inline int64_t GetBlocks(const int64_t n) { return (n + cuda_num_threads - 1) / cuda_num_threads; } // This method will launch a kernel over the entire domain numel. template <typename TFuncPtr, typename... Args> // C++11 varadic function static void LaunchKernelLoop(lua_State* L, TFuncPtr func, const CudaGridBase& domain, Args... args) { THCState* state = cutorch_getstate(L); // Call the function. // const int64_t numel = THCudaTensor_nElement(state, domain); const int64_t numel = domain.numel(); hipLaunchKernelGGL(( func), dim3(GetBlocks(numel)), dim3(cuda_num_threads), 0, THCState_getCurrentStream(state), args...); THCudaCheck(hipGetLastError()); } // Assumes you're iterating over a scalar domain (i.e nchan = 1 for the domain // you're iterating over). The LaunchKernelLoop forces this since you cannot // specify a nchan. __device__ __forceinline__ void PntIdToScalarIndices( const int32_t nbatch, const int32_t zsize, const int32_t ysize, const int32_t xsize, const int32_t& pnt_id, int32_t& batch, int32_t& k, int32_t& j, int32_t& i) { i = pnt_id % xsize; j = (pnt_id / xsize) % ysize; k = (pnt_id / xsize / ysize) % zsize; batch = (pnt_id / xsize / ysize / zsize); } // CUDA: grid stride looping. // This strategy comes from similar code in the cunn library. #define CUDA_KERNEL_LOOP(numel, pnt_id) \ for (int32_t pnt_id = blockIdx.x * blockDim.x + threadIdx.x; \ pnt_id < (numel); \ pnt_id += blockDim.x * gridDim.x) // ***************************************************************************** // GetKernelIndices // ***************************************************************************** // Another helper function to get back the batch, chan, k, j, i indices in a // kernel launch by the LaunchKernel function above. // // If GetKernelIndices returns true, then the current kernel is out of the // domain (and so you should just exist the kernel). This happens because // the tensor may not fill up the last grid. // // Note, you should ALWAYS pass in the same sizes as the tensor you used // to call the kernel in LaunchKernel's domain parameter. __device__ __forceinline__ bool GetKernelIndices( const int32_t bsize, const int32_t csize, const int32_t zsize, const int32_t ysize, const int32_t xsize, int32_t& batch, int32_t& chan, int32_t& k, int32_t& j, int32_t& i) { const int pnt_id = threadIdx.x + blockIdx.x * blockDim.x; chan = blockIdx.y; batch = blockIdx.z; if (pnt_id >= zsize * ysize * xsize) { return true; } i = pnt_id % xsize; j = (pnt_id / xsize) % ysize; k = pnt_id / (xsize * ysize); return false; } // Same as above but on one of our Grid objects. __device__ __forceinline__ bool GetKernelIndices( const CudaGridBase& domain, int32_t& batch, int32_t& chan, int32_t& k, int32_t& j, int32_t& i) { const int pnt_id = threadIdx.x + blockIdx.x * blockDim.x; chan = blockIdx.y; batch = blockIdx.z; if (pnt_id >= (domain.zsize() * domain.ysize() * domain.xsize())) { return true; } i = pnt_id % domain.xsize(); j = (pnt_id / domain.xsize()) % domain.ysize(); k = pnt_id / (domain.ysize() * domain.xsize()); return false; } // There are a LOT of methods in tfluids that borrow heavily (or port) parts of // Manta. These are compiled here but note that they are added under a separate // license. You should see FluidNet/torch/tfluids/third_party/README for more // information. #include "third_party/tfluids.cu" // ***************************************************************************** // velocityDivergenceBackward // ***************************************************************************** __global__ void velocityDivergenceBackward( CudaFlagGrid flags, CudaMACGrid grad_u, CudaRealGrid grad_output, const int32_t bnd) { int32_t b, chan, k, j, i; if (GetKernelIndices(flags, b, chan, k, j, i)) { return; } if (i < bnd || i > flags.xsize() - 1 - bnd || j < bnd || j > flags.ysize() - 1 - bnd || (flags.is_3d() && (k < bnd || k > flags.zsize() - 1 - bnd))) { // Manta zeros stuff on the border in the forward pass, so they do // not contribute gradient. return; } if (!flags.isFluid(i, j, k, b)) { // Blocked cells don't contribute gradient. return; } // TODO(tompson): I'm sure these atomic add calls are slow! We should // probably change this from a scatter to a gather op to avoid having to use // them at all. // (NVIDIA state that atomic operations on global memory are extremely slow) // but on shared memory it is OK. So we could copy to shared first, use // atomic ops there then use a small number of atomic ops back to global mem // (probably rewriting it as a gather would be easier). const float go = grad_output(i, j, k, b); atomicAdd(&grad_u(i, j, k, 0, b), go); atomicAdd(&grad_u(i + 1, j, k, 0, b), -go); atomicAdd(&grad_u(i, j, k, 1, b), go); atomicAdd(&grad_u(i, j + 1, k, 1, b), -go); if (flags.is_3d()) { atomicAdd(&grad_u(i, j, k, 2, b), go); atomicAdd(&grad_u(i, j, k + 1, 2, b), -go); } } static int tfluids_CudaMain_velocityDivergenceBackward(lua_State* L) { THCState* state = cutorch_getstate(L); // Get the args from the lua stack. NOTE: We do ALL arguments (size checking) // on the lua stack. THCudaTensor* tensor_u = reinterpret_cast<THCudaTensor*>( luaT_checkudata(L, 1, "torch.CudaTensor")); THCudaTensor* tensor_flags = reinterpret_cast<THCudaTensor*>( luaT_checkudata(L, 2, "torch.CudaTensor")); THCudaTensor* tensor_grad_output = reinterpret_cast<THCudaTensor*>( luaT_checkudata(L, 3, "torch.CudaTensor")); const bool is_3d = static_cast<bool>(lua_toboolean(L, 4)); THCudaTensor* tensor_grad_u = reinterpret_cast<THCudaTensor*>( luaT_checkudata(L, 5, "torch.CudaTensor")); CudaFlagGrid flags = toCudaFlagGrid(state, tensor_flags, is_3d); CudaMACGrid grad_u = toCudaMACGrid(state, tensor_grad_u, is_3d); CudaRealGrid grad_output = toCudaRealGrid(state, tensor_grad_output, is_3d); // Firstly, we're going to accumulate gradient contributions, so set // grad_u to 0. THCudaTensor_zero(state, tensor_grad_u); // LaunchKernel args: lua_State, func, domain, args... const int32_t bnd = 1; LaunchKernel(L, &velocityDivergenceBackward, flags, flags, grad_u, grad_output, bnd); return 0; // Recall: number of return values on the lua stack. } // ***************************************************************************** // emptyDomain // ***************************************************************************** __global__ void emptyDomainLoop( CudaFlagGrid flags, const bool is_3d, const int32_t bnd, const int32_t nbatch, const int32_t zsize, const int32_t ysize, const int32_t xsize, const int32_t numel) { int32_t b, k, j, i; CUDA_KERNEL_LOOP(numel, pnt_id) { PntIdToScalarIndices(nbatch, zsize, ysize, xsize, pnt_id, b, k, j, i); if (i < bnd || i > flags.xsize() - 1 - bnd || j < bnd || j > flags.ysize() - 1 - bnd || (is_3d && (k < bnd || k > flags.zsize() - 1 - bnd))) { flags(i, j, k, b) = TypeObstacle; } else { flags(i, j, k, b) = TypeFluid; } } } __global__ void emptyDomain( CudaFlagGrid flags, const bool is_3d, const int32_t bnd) { int32_t b, dim, k, j, i; if (GetKernelIndices(flags, b, dim, k, j, i)) { return; } if (i < bnd || i > flags.xsize() - 1 - bnd || j < bnd || j > flags.ysize() - 1 - bnd || (is_3d && (k < bnd || k > flags.zsize() - 1 - bnd))) { flags(i, j, k, b) = TypeObstacle; } else { flags(i, j, k, b) = TypeFluid; } } static int tfluids_CudaMain_emptyDomain(lua_State* L) { THCState* state = cutorch_getstate(L); // Get the args from the lua stack. NOTE: We do ALL arguments (size checking) // on the lua stack. THCudaTensor* tensor_flags = reinterpret_cast<THCudaTensor*>( luaT_checkudata(L, 1, "torch.CudaTensor")); const bool is_3d = static_cast<bool>(lua_toboolean(L, 2)); const int32_t bnd = static_cast<int32_t>(lua_tointeger(L, 3)); CudaFlagGrid flags = toCudaFlagGrid(state, tensor_flags, is_3d); // LaunchKernel args: lua_State, func, domain, args... // Looped version - Actually not really any faster.. // LaunchKernelLoop(L, &emptyDomainLoop, flags, // flags, is_3d, bnd, flags.nbatch(), flags.zsize(), // flags.ysize(), flags.xsize(), flags.numel()); LaunchKernel(L, &emptyDomain, flags, flags, is_3d, bnd); return 0; } // ***************************************************************************** // flagsToOccupancy // ***************************************************************************** __global__ void flagsToOccupancy(CudaFlagGrid flags, CudaFlagGrid occupancy) { int32_t b, chan, k, j, i; if (GetKernelIndices(flags, b, chan, k, j, i)) { return; } float val; if (flags.isFluid(i, j, k, b)) { val = 0; } else if (flags.isObstacle(i, j, k, b)) { val = 1; } else { val = -1; // Can't throw error in kernel. Set to -1 and check min. } occupancy(i, j, k, b) = val; } static int tfluids_CudaMain_flagsToOccupancy(lua_State* L) { THCState* state = cutorch_getstate(L); // Get the args from the lua stack. NOTE: We do ALL arguments (size checking) // on the lua stack. THCudaTensor* tensor_flags = reinterpret_cast<THCudaTensor*>( luaT_checkudata(L, 1, "torch.CudaTensor")); THCudaTensor* tensor_occupancy = reinterpret_cast<THCudaTensor*>( luaT_checkudata(L, 2, "torch.CudaTensor")); // Normally, we would pass this in, but actually it doesn't make a difference // to the calculation. const bool is_3d = tensor_flags->size[2] > 1; CudaFlagGrid flags = toCudaFlagGrid(state, tensor_flags, is_3d); CudaFlagGrid occupancy = toCudaFlagGrid(state, tensor_occupancy, is_3d); // LaunchKernel args: lua_State, func, domain, args... LaunchKernel(L, &flagsToOccupancy, flags, flags, occupancy); // We could be pedantic and check that the occupancy grid is OK. But this // reduction is very expensive on GPU. // if (THCudaTensor_minall(state, tensor_occupancy) < 0) { // luaL_error(L, "ERROR: unsupported flag cell found!"); // } return 0; } // ***************************************************************************** // velocityUpdateBackward // ***************************************************************************** __global__ void velocityUpdateBackward( CudaFlagGrid flags, CudaMACGrid grad_output, CudaRealGrid grad_p, const int32_t bnd) { int32_t b, chan, k, j, i; if (GetKernelIndices(flags, b, chan, k, j, i)) { return; } if (i < bnd || i > flags.xsize() - 1 - bnd || j < bnd || j > flags.ysize() - 1 - bnd || (flags.is_3d() && (k < bnd || k > flags.zsize() - 1 - bnd))) { // Manta zeros stuff on the border in the forward pass, so they do // not contribute gradient. return; } const CudaVec3 go(grad_output(i, j, k, b)); // TODO(tompson): I'm sure these atomic add calls are slow! We should // probably change this from a scatter to a gather op to avoid having to use // them at all. // (NVIDIA state that atomic operations on global memory are extremely slow) // but on shared memory it is OK. So we could copy to shared first, use // atomic ops there then use a small number of atomic ops back to global mem // (probably rewriting it as a gather would be easier). if (flags.isFluid(i, j, k, b)) { if (flags.isFluid(i - 1, j, k, b)) { atomicAdd(&grad_p(i, j, k, b), -go.x); atomicAdd(&grad_p(i - 1, j, k, b), go.x); } if (flags.isFluid(i, j - 1, k, b)) { atomicAdd(&grad_p(i, j, k, b), -go.y); atomicAdd(&grad_p(i, j - 1, k, b), go.y); } if (flags.is_3d() && flags.isFluid(i, j, k - 1, b)) { atomicAdd(&grad_p(i, j, k, b), -go.z); atomicAdd(&grad_p(i, j, k - 1, b), go.z); } if (flags.isEmpty(i - 1, j, k, b)) { atomicAdd(&grad_p(i, j, k, b), -go.x); } if (flags.isEmpty(i, j - 1, k, b)) { atomicAdd(&grad_p(i, j, k, b), -go.y); } if (flags.is_3d() && flags.isEmpty(i, j, k - 1, b)) { atomicAdd(&grad_p(i, j, k, b), -go.z); } } else if (flags.isEmpty(i, j, k, b) && !flags.isOutflow(i, j, k, b)) { // don't change velocities in outflow cells if (flags.isFluid(i - 1, j, k, b)) { atomicAdd(&grad_p(i - 1, j, k, b), go.x); } else { // Output doesn't depend on p, so gradient is zero and so doesn't // contribute. } if (flags.isFluid(i, j - 1, k, b)) { atomicAdd(&grad_p(i, j - 1, k, b), go.y); } else { // Output doesn't depend on p, so gradient is zero and so doesn't // contribute. } if (flags.is_3d()) { if (flags.isFluid(i, j, k - 1, b)) { atomicAdd(&grad_p(i, j, k - 1, b), go.z); } else { // Output doesn't depend on p, so gradient is zero and so // doesn't contribute. } } } } static int tfluids_CudaMain_velocityUpdateBackward(lua_State* L) { THCState* state = cutorch_getstate(L); // Get the args from the lua stack. NOTE: We do ALL arguments (size checking) // on the lua stack. THCudaTensor* tensor_u = reinterpret_cast<THCudaTensor*>( luaT_checkudata(L, 1, "torch.CudaTensor")); THCudaTensor* tensor_flags = reinterpret_cast<THCudaTensor*>( luaT_checkudata(L, 2, "torch.CudaTensor")); THCudaTensor* tensor_p = reinterpret_cast<THCudaTensor*>( luaT_checkudata(L, 3, "torch.CudaTensor")); THCudaTensor* tensor_grad_output = reinterpret_cast<THCudaTensor*>( luaT_checkudata(L, 4, "torch.CudaTensor")); const bool is_3d = static_cast<bool>(lua_toboolean(L, 5)); THCudaTensor* tensor_grad_p = reinterpret_cast<THCudaTensor*>( luaT_checkudata(L, 6, "torch.CudaTensor")); CudaFlagGrid flags = toCudaFlagGrid(state, tensor_flags, is_3d); CudaMACGrid grad_output = toCudaMACGrid(state, tensor_grad_output, is_3d); CudaRealGrid grad_p = toCudaRealGrid(state, tensor_grad_p, is_3d); // Firstly, we're going to accumulate gradient contributions, so set // grad_p to 0. THCudaTensor_zero(state, tensor_grad_p); const int32_t bnd = 1; // LaunchKernel args: lua_State, func, domain, args... LaunchKernel(L, &velocityUpdateBackward, flags, flags, grad_output, grad_p, bnd); return 0; // Recall: number of return values on the lua stack. } // ***************************************************************************** // volumetricUpsamplingNearestForward // ***************************************************************************** __global__ void volumetricUpSamplingNearestForward( const int ratio, THCDeviceTensor<float, 5> in, THCDeviceTensor<float, 5> out) { const int pnt_id = threadIdx.x + blockIdx.x * blockDim.x; const int chan = blockIdx.y; const int batch = blockIdx.z; if (pnt_id >= (out.getSize(2) * out.getSize(3) * out.getSize(4))) { return; } const int x = pnt_id % out.getSize(4); const int y = (pnt_id / out.getSize(4)) % out.getSize(3); const int z = pnt_id / (out.getSize(3) * out.getSize(4)); const int xin = x / ratio; const int yin = y / ratio; const int zin = z / ratio; const float inVal = in[batch][chan][zin][yin][xin]; out[batch][chan][z][y][x] = inVal; } static int tfluids_CudaMain_volumetricUpSamplingNearestForward(lua_State* L) { THCState* state = cutorch_getstate(L); const int32_t ratio = static_cast<int32_t>(lua_tointeger(L, 1)); THCudaTensor* input = reinterpret_cast<THCudaTensor*>( luaT_checkudata(L, 2, "torch.CudaTensor")); THCudaTensor* output = reinterpret_cast<THCudaTensor*>( luaT_checkudata(L, 3, "torch.CudaTensor")); if (input->nDimension != 5 || output->nDimension != 5) { luaL_error(L, "ERROR: input and output must be dim 5"); } const int32_t nbatch = input->size[0]; const int32_t nfeat = input->size[1]; const int32_t zdim = input->size[2]; const int32_t ydim = input->size[3]; const int32_t xdim = input->size[4]; if (output->size[0] != nbatch || output->size[1] != nfeat || output->size[2] != zdim * ratio || output->size[3] != ydim * ratio || output->size[4] != xdim * ratio) { luaL_error(L, "ERROR: input : output size mismatch."); } THCDeviceTensor<float, 5> dev_in = toDeviceTensor<float, 5>(state, input); THCDeviceTensor<float, 5> dev_out = toDeviceTensor<float, 5>(state, output); if (!THCudaTensor_isContiguous(state, input)) { luaL_error(L, "ERROR: input must be contiguous"); } if (!THCudaTensor_isContiguous(state, output)) { luaL_error(L, "ERROR: output must be contiguous"); } // One thread per output element. int nplane = dev_out.getSize(2) * dev_out.getSize(3) * dev_out.getSize(4); dim3 grid_size(THCCeilDiv(nplane, threads_per_block), dev_out.getSize(1), dev_out.getSize(0)); dim3 block_size(nplane > threads_per_block ? threads_per_block : nplane); hipLaunchKernelGGL(( volumetricUpSamplingNearestForward), dim3(grid_size), dim3(block_size), 0, THCState_getCurrentStream(state), ratio, dev_in, dev_out); return 0; } // ***************************************************************************** // volumetricUpsamplingNearestBackward // ***************************************************************************** __global__ void volumetricUpSamplingNearestBackward( const int ratio, THCDeviceTensor<float, 5> grad_out, THCDeviceTensor<float, 5> grad_in) { const int pnt_id = threadIdx.x + blockIdx.x * blockDim.x; const int chan = blockIdx.y; const int batch = blockIdx.z; if (pnt_id >= (grad_in.getSize(2) * grad_in.getSize(3) * grad_in.getSize(4))) { return; } const int x = pnt_id % grad_in.getSize(4); const int y = (pnt_id / grad_in.getSize(4)) % grad_in.getSize(3); const int z = pnt_id / (grad_in.getSize(3) * grad_in.getSize(4)); float sum = 0.0f; // Now accumulate gradients from the upsampling window. for (int32_t zup = 0; zup < ratio; zup++) { for (int32_t yup = 0; yup < ratio; yup++) { for (int32_t xup = 0; xup < ratio; xup++) { const int xin = x * ratio + xup; const int yin = y * ratio + yup; const int zin = z * ratio + zup; const float val = grad_out[batch][chan][zin][yin][xin]; sum += val; } } } grad_in[batch][chan][z][y][x] = sum; } static int tfluids_CudaMain_volumetricUpSamplingNearestBackward(lua_State* L) { THCState* state = cutorch_getstate(L); const int32_t ratio = static_cast<int32_t>(lua_tointeger(L, 1)); THCudaTensor* input = reinterpret_cast<THCudaTensor*>( luaT_checkudata(L, 2, "torch.CudaTensor")); THCudaTensor* grad_output = reinterpret_cast<THCudaTensor*>( luaT_checkudata(L, 3, "torch.CudaTensor")); THCudaTensor* grad_input = reinterpret_cast<THCudaTensor*>( luaT_checkudata(L, 4, "torch.CudaTensor")); if (input->nDimension != 5 || grad_output->nDimension != 5 || grad_input->nDimension != 5) { luaL_error(L, "ERROR: input, gradOutput and gradInput must be dim 5"); } const int32_t nbatch = input->size[0]; const int32_t nfeat = input->size[1]; const int32_t zdim = input->size[2]; const int32_t ydim = input->size[3]; const int32_t xdim = input->size[4]; if (grad_output->size[0] != nbatch || grad_output->size[1] != nfeat || grad_output->size[2] != zdim * ratio || grad_output->size[3] != ydim * ratio || grad_output->size[4] != xdim * ratio) { luaL_error(L, "ERROR: input : gradOutput size mismatch."); } if (grad_input->size[0] != nbatch || grad_input->size[1] != nfeat || grad_input->size[2] != zdim || grad_input->size[3] != ydim || grad_input->size[4] != xdim) { luaL_error(L, "ERROR: input : gradInput size mismatch."); } THCDeviceTensor<float, 5> dev_in = toDeviceTensor<float, 5>(state, input); THCDeviceTensor<float, 5> dev_grad_out = toDeviceTensor<float, 5>( state, grad_output); THCDeviceTensor<float, 5> dev_grad_in = toDeviceTensor<float, 5>( state, grad_input); if (!THCudaTensor_isContiguous(state, input)) { luaL_error(L, "ERROR: input must be contiguous"); } if (!THCudaTensor_isContiguous(state, grad_output)) { luaL_error(L, "ERROR: gradOutput must be contiguous"); } if (!THCudaTensor_isContiguous(state, grad_input)) { luaL_error(L, "ERROR: gradInput must be contiguous"); } // One thread per grad_input element. // TODO(tompson): This is slow. Switch to a looping kernel. int nplane = dev_grad_in.getSize(2) * dev_grad_in.getSize(3) * dev_grad_in.getSize(4); dim3 grid_size(THCCeilDiv(nplane, threads_per_block), dev_grad_in.getSize(1), dev_grad_in.getSize(0)); dim3 block_size(nplane > threads_per_block ? threads_per_block : nplane); hipLaunchKernelGGL(( volumetricUpSamplingNearestBackward), dim3(grid_size), dim3(block_size), 0, THCState_getCurrentStream(state), ratio, dev_grad_out, dev_grad_in); return 0; } // ***************************************************************************** // signedDistanceField // ***************************************************************************** __global__ void signedDistanceField( CudaFlagGrid flags, const int search_rad, CudaRealGrid dst) { int b, chan, z, y, x; if (GetKernelIndices(flags, b, chan, z, y, x)) { return; } if (flags.isObstacle(x, y, z, b)) { dst(x, y, z, b) = 0; } float dist_sq = static_cast<float>(search_rad * search_rad); const int zmin = max(0, z - search_rad);; const int zmax = min((int)flags.zsize() - 1, z + search_rad); const int ymin = max(0, y - search_rad);; const int ymax = min((int)flags.ysize() - 1, y + search_rad); const int xmin = max(0, x - search_rad);; const int xmax = min((int)flags.xsize() - 1, x + search_rad); for (int zsearch = zmin; zsearch <= zmax; zsearch++) { for (int ysearch = ymin; ysearch <= ymax; ysearch++) { for (int xsearch = xmin; xsearch <= xmax; xsearch++) { if (flags.isObstacle(xsearch, ysearch, zsearch, b)) { const float cur_dist_sq = ((z - zsearch) * (z - zsearch) + (y - ysearch) * (y - ysearch) + (x - xsearch) * (x - xsearch)); if (dist_sq > cur_dist_sq) { dist_sq = cur_dist_sq; } } } } } dst(x, y, z, b) = sqrt(dist_sq); } static int tfluids_CudaMain_signedDistanceField(lua_State *L) { THCState* state = cutorch_getstate(L); // Get the args from the lua stack. NOTE: We do ALL arguments (size checking) // on the lua stack. THCudaTensor* tensor_flags = reinterpret_cast<THCudaTensor*>( luaT_checkudata(L, 1, "torch.CudaTensor")); const int32_t search_rad = static_cast<int32_t>(lua_tointeger(L, 2)); const bool is_3d = static_cast<bool>(lua_toboolean(L, 3)); THCudaTensor* tensor_dst = reinterpret_cast<THCudaTensor*>( luaT_checkudata(L, 4, "torch.CudaTensor")); CudaFlagGrid flags = toCudaFlagGrid(state, tensor_flags, is_3d); CudaRealGrid dst = toCudaRealGrid(state, tensor_dst, is_3d); // LaunchKernel args: lua_State, func, domain, args... LaunchKernel(L, &signedDistanceField, flags, flags, search_rad, dst); return 0; } //****************************************************************************** // solveLinearSystemPCG //****************************************************************************** static hipblasHandle_t cublas_handle = 0; static void init_cublas() { if (cublas_handle == 0) { hipblasStatus_t status = hipblasCreate(&cublas_handle); if (status != HIPBLAS_STATUS_SUCCESS) { THError("CUBLAS Library initialization failed"); } } } static hipsparseHandle_t cusparse_handle = 0; static void init_cusparse() { if (cusparse_handle == 0) { hipsparseStatus_t status = hipsparseCreate(&cusparse_handle); if (status != HIPSPARSE_STATUS_SUCCESS) { THError("CUSPARSE Library initialization failed"); } } } // Method from: // stackoverflow.com/questions/30454089/solving-sparse-definite-positive-linear-systems-in-cuda // NOLINT static const char* cusparseGetStatusString(hipsparseStatus_t status) { switch (status) { case HIPSPARSE_STATUS_SUCCESS: return "HIPSPARSE_STATUS_SUCCESS"; case HIPSPARSE_STATUS_NOT_INITIALIZED: return "HIPSPARSE_STATUS_NOT_INITIALIZED"; case HIPSPARSE_STATUS_ALLOC_FAILED: return "HIPSPARSE_STATUS_ALLOC_FAILED"; case HIPSPARSE_STATUS_INVALID_VALUE: return "HIPSPARSE_STATUS_INVALID_VALUE"; case HIPSPARSE_STATUS_ARCH_MISMATCH: return "HIPSPARSE_STATUS_ARCH_MISMATCH"; case HIPSPARSE_STATUS_MAPPING_ERROR: return "HIPSPARSE_STATUS_MAPPING_ERROR"; case HIPSPARSE_STATUS_EXECUTION_FAILED: return "HIPSPARSE_STATUS_EXECUTION_FAILED"; case HIPSPARSE_STATUS_INTERNAL_ERROR: return "HIPSPARSE_STATUS_INTERNAL_ERROR"; case HIPSPARSE_STATUS_MATRIX_TYPE_NOT_SUPPORTED: return "HIPSPARSE_STATUS_MATRIX_TYPE_NOT_SUPPORTED"; case HIPSPARSE_STATUS_ZERO_PIVOT: return "HIPSPARSE_STATUS_ZERO_PIVOT"; default: return "<unknown cusparse error>"; } } #define CHECK_CUSPARSE(expr) checkCusparseStatus((expr), __FILE__, __LINE__) void checkCusparseStatus(hipsparseStatus_t stat, char const * file, int line) { if (stat != HIPSPARSE_STATUS_SUCCESS) { std::cout << "CUSPARSE error in file '" << file << "', line " << line << ": error(" << stat << "): " << cusparseGetStatusString(stat) << std::endl; } THCudaCheck(hipGetLastError()); // Sometimes, torch's cuda handle wont catch the error but cusparse enum // is bad. If that's the case, hard fail here. if (stat != HIPSPARSE_STATUS_SUCCESS) { THError("CUSPARSE error"); exit(-1); } } static const char* cublasGetStatusString(hipblasStatus_t status) { switch (status) { case HIPBLAS_STATUS_SUCCESS: return "HIPBLAS_STATUS_SUCCESS"; case HIPBLAS_STATUS_NOT_INITIALIZED: return "HIPBLAS_STATUS_NOT_INITIALIZED"; case HIPBLAS_STATUS_ALLOC_FAILED: return "HIPBLAS_STATUS_ALLOC_FAILED"; case HIPBLAS_STATUS_INVALID_VALUE: return "HIPBLAS_STATUS_INVALID_VALUE"; case HIPBLAS_STATUS_ARCH_MISMATCH: return "HIPBLAS_STATUS_ARCH_MISMATCH"; case HIPBLAS_STATUS_MAPPING_ERROR: return "HIPBLAS_STATUS_MAPPING_ERROR"; case HIPBLAS_STATUS_EXECUTION_FAILED: return "HIPBLAS_STATUS_EXECUTION_FAILED"; case HIPBLAS_STATUS_INTERNAL_ERROR: return "HIPBLAS_STATUS_INTERNAL_ERROR"; default: return "<unknown cublas error>"; } } #define CHECK_CUBLAS(expr) checkCublasStatus((expr), __FILE__, __LINE__) void checkCublasStatus(hipblasStatus_t stat, char const * file, int line) { if (stat != HIPBLAS_STATUS_SUCCESS) { std::cout << "CUBLAS error in file '" << file << "', line " << line << ": error(" << stat << "): " << cublasGetStatusString(stat) << std::endl; } THCudaCheck(hipGetLastError()); // Sometimes, torch's cuda handle wont catch the error but cusparse enum // is bad. If that's the case, hard fail here. if (stat != HIPBLAS_STATUS_SUCCESS) { THError("CUBLAS error"); exit(-1); } } // These macros require that state be defined, which you can get by calling // cutorch_getstate. #define DEV_PTR(tensor) THCudaTensor_data(state, tensor) #define DEV_INT_PTR(tensor) THCudaIntTensor_data(state, tensor) int64_t createReducedSystemIndices( const tfluids_FloatFlagGrid& flags, THIntTensor* components, THIntTensor* indices, const int32_t ibatch, const int32_t icomponent) { if (indices->nDimension != 3) { THError("indices must be 3D"); } if (components->nDimension != 3) { THError("components must be 3D"); } int64_t cur_index = 0; const int32_t zsize = flags.zsize(); const int32_t ysize = flags.ysize(); const int32_t xsize = flags.xsize(); if ((indices->size[0] != zsize || indices->size[1] != ysize || indices->size[2] != xsize)) { THError("indices must be the same dimension as flags (non-batched)"); } for (int32_t k = 0; k < zsize; k++) { for (int32_t j = 0; j < ysize; j++) { for (int32_t i = 0; i < xsize; i++) { if (THIntTensor_get3d(components, k, j, i) == icomponent) { // Note, if it's part of the connected component of fluid cells then // we don't have to check if it's fluid. However we should do anyway // just to make sure. if (!flags.isFluid(i, j, k, ibatch)) { THError("A non fluid component was found!"); } THIntTensor_set3d(indices, k, j, i, cur_index); cur_index++; } else { THIntTensor_set3d(indices, k, j, i, -1); } } } } return cur_index; } // @param I, col, and val: the output CSR formatted sparse matrix. // TOTO(tompson): This is super slow. All the _get and _set methods do bounds // checking. Once everything is working, switch to raw ptrs. static int64_t setupLaplacian( const tfluids_FloatFlagGrid& flags, const int32_t b, THIntTensor* row, THIntTensor* col, THFloatTensor *val, const bool upper_triangular, THIntTensor* system_indices, THIntTensor* components, const int icomponent) { // row stores the indices of the first non-zero item in the col and val // arrays. (i.e. col[row[n]] is the column index of the 1st element of row n. // and val[row[4]] is the corresponding value. // The number of non-zero values in each row is given by row[n + 1] - row[n]. // Hence the need to have (dim + 1) row values. int64_t current_row = 0; int64_t val_index = 0; THIntTensor_set1d(row, current_row, 0); // 0th row starts at 0th index. // TODO(tompson): Parallelize this. const int32_t zsize = flags.zsize(); const int32_t ysize = flags.ysize(); const int32_t xsize = flags.xsize(); const int32_t bnd = 1; for (int32_t k = 0; k < zsize; k++) { for (int32_t j = 0; j < ysize; j++) { for (int32_t i = 0; i < xsize; i++) { if (THIntTensor_get3d(components, k, j, i) != icomponent) { // Not part of the current connected component. // Make sure the current cell wasn't assigned an index in the output // system. if (THIntTensor_get3d(system_indices, k, j, i) != -1) { THError("Non fluid cell shouldn't have an index!"); } continue; } const bool out_of_bounds = (i < bnd || i > xsize - 1 - bnd || j < bnd || j > ysize - 1 - bnd || (flags.is_3d() && (k < bnd || k > zsize - 1 - bnd))); // As per Manta's convention, the border are all obstacle cells. // Therefore their divergence (rhs) is zero. AND the do not contribute // non-zero elements to the sparse matrix. As such, we just skip // over them. // Technically the isFluid check here is completely redundant (since // it's part of a component), but lets do it anyway for clarity). if (!out_of_bounds && flags.isFluid(i, j, k, b)) { // Collect the diagonal term first. The diag term is the sum of // NON obstacle cells. In most cases this is the same as fluid cells, // but empty cells also contribute flow. float val_diagonal = 0.0f; if (!flags.isObstacle(i - 1, j, k, b)) { val_diagonal += 1; } if (!flags.isObstacle(i + 1, j, k, b)) { val_diagonal += 1; } if (!flags.isObstacle(i, j - 1, k, b)) { val_diagonal += 1; } if (!flags.isObstacle(i, j + 1, k, b)) { val_diagonal += 1; } if (flags.is_3d() && !flags.isObstacle(i, j, k - 1, b)) { val_diagonal += 1; } if (flags.is_3d() && !flags.isObstacle(i, j, k + 1, b)) { val_diagonal += 1; } // Off diagonal entries. float im1jk = 0.0f; if (!upper_triangular && flags.isFluid(i - 1, j, k, b)) { im1jk = -1.0f; // Off diagonal entry for fluid neighbors is -1. } float ip1jk = 0.0f; if (flags.isFluid(i + 1, j, k, b)) { ip1jk = -1.0f; } float ijm1k = 0.0f; if (!upper_triangular && flags.isFluid(i, j - 1, k, b)) { ijm1k = -1.0f; } float ijp1k = 0.0f; if (flags.isFluid(i, j + 1, k, b)) { ijp1k = -1.0f; } float ijkm1 = 0.0f; float ijkp1 = 0.0f; if (flags.is_3d()) { if (!upper_triangular && flags.isFluid(i, j, k - 1, b)) { ijkm1 = -1.0f; } if (flags.isFluid(i, j, k + 1, b)) { ijkp1 = -1.0f; } } // Set the matrix values now. Setting values in increasing index // order as it is done this way by the denseToCSR. // Also every example I have seen does it this way. if (ijkm1 != 0.0f) { // We can't just use the flat index (x + (y * w) + (z * w * h)) // as the column index because we're operating on a reduced system. // Therefore we need to look up the system_index. const int isys = THIntTensor_get3d(system_indices, k - 1, j, i); if (isys < 0) { THError("system index is not defined!"); } THFloatTensor_set1d(val, val_index, ijkm1); THIntTensor_set1d(col, val_index, isys); val_index++; // increment the val and col place } if (ijm1k != 0.0f) { const int isys = THIntTensor_get3d(system_indices, k, j - 1, i); if (isys < 0) { THError("system index is not defined!"); } THFloatTensor_set1d(val, val_index, ijm1k); THIntTensor_set1d(col, val_index, isys); val_index++; } if (im1jk != 0.0f) { const int isys = THIntTensor_get3d(system_indices, k, j, i - 1); if (isys < 0) { THError("system index is not defined!"); } THFloatTensor_set1d(val, val_index, im1jk); THIntTensor_set1d(col, val_index, isys); val_index++; } { // For scoping of isys. const int isys = THIntTensor_get3d(system_indices, k, j, i); if (isys < 0) { THError("system index is not defined!"); } THFloatTensor_set1d(val, val_index, val_diagonal); THIntTensor_set1d(col, val_index, isys); val_index++; } if (ip1jk != 0.0f) { const int isys = THIntTensor_get3d(system_indices, k, j, i + 1); if (isys < 0) { THError("system index is not defined!"); } THFloatTensor_set1d(val, val_index, ip1jk); THIntTensor_set1d(col, val_index, isys); val_index++; } if (ijp1k != 0.0f) { const int isys = THIntTensor_get3d(system_indices, k, j + 1, i); if (isys < 0) { THError("system index is not defined!"); } THFloatTensor_set1d(val, val_index, ijp1k); THIntTensor_set1d(col, val_index, isys); val_index++; } if (ijkp1 != 0.0f) { const int isys = THIntTensor_get3d(system_indices, k + 1, j, i); if (isys < 0) { THError("system index is not defined!"); } THFloatTensor_set1d(val, val_index, ijkp1); THIntTensor_set1d(col, val_index, isys); val_index++; } current_row++; THIntTensor_set1d(row, current_row, val_index); } else { // isFluid & inBounds // We shouldn't have got here. All cells in a component should be // fluid cells. std::cout << "Non fluid cell found in a connected component or " << "fluid cell found on the domain border:" << " flags(i, j, k, b) = " << flags(i, j, k, b) << std::endl; // TODO(tompson): Manta always has 1 solid component on the border, // but should we allow it? THError("Non fluid cell found in a connected component"); } } } } return val_index; // Return number of non-zero entries in the matrix A. } // allocTempTensor expects a lua table on the stack in index 1, that we will // store a bunch of temporary tensors. We will allocate these on demand, i.e. // we will return the existing tensors if they exist, else we will create a new // one. template <typename TensorType> TensorType* allocTempTensor(lua_State* L, const char* name, const char* typeStr, TensorType* (*newFunc)()) { TensorType* tensor = nullptr; luaL_checktype(L, 1, LUA_TTABLE); lua_getfield(L, 1, name); // Stack now has: // 2, 3, 4, ...: Rest of args to c function. // 1: tfluids._tmpPCG // -1: tfluids._tmpPCG[name] if (lua_isnil(L, -1)) { lua_pop(L, 1); // Pop the nil. // Create a new tensor. tensor = newFunc(); // Push the new tensor into the table. lua_pushstring(L, name); luaT_pushudata(L, (void *)tensor, typeStr); lua_settable(L, 1); // Note: pops both key and value. } else { // Get the value. tensor = reinterpret_cast<TensorType*>(luaT_checkudata(L, -1, typeStr)); // Pop the tensor from the stack. lua_pop(L, 1); } return tensor; } // allocTempCudaTensor is the same as above, except annoyingly the 'new' func // signature is differnt and there's no easy way to template the function ptr // without wrapping it with a static number of arguments. It's easier just // to define two methods, even though it's replicated code. template <typename TensorType> TensorType* allocTempCudaTensor(lua_State* L, const char* name, const char* typeStr, TensorType* (*newFunc)(THCState*)) { TensorType* tensor = nullptr; luaL_checktype(L, 1, LUA_TTABLE); lua_getfield(L, 1, name); if (lua_isnil(L, -1)) { lua_pop(L, 1); tensor = newFunc(cutorch_getstate(L)); lua_pushstring(L, name); luaT_pushudata(L, (void *)tensor, typeStr); lua_settable(L, 1); } else { tensor = reinterpret_cast<TensorType*>(luaT_checkudata(L, -1, typeStr)); lua_pop(L, 1); } return tensor; } // calpToEpsilon clamps to positive or negative epsilon depending on sign.. inline float clampToEpsilon(const float val, const float epsilon) { if (std::abs(val) < epsilon) { if (val < 0) { return ::min(val, -epsilon); } else { return ::max(val, epsilon); } } else { return val; } } __global__ void copyPressureFromSystem( THCDeviceTensor<int, 3> system_indices, THCDeviceTensor<float, 1> pressure_pcg, CudaRealGrid pressure, const int32_t bout, const float mean) { const int32_t xsize = system_indices.getSize(2); const int32_t ysize = system_indices.getSize(1); const int32_t zsize = system_indices.getSize(0); int32_t b, chan, k, j, i; // b and chan will always be zero (because we call this on the non-batched // tensor). if (GetKernelIndices(1, 1, zsize, ysize, xsize, b, chan, k, j, i)) { return; } // Look up the system index for the current voxel / pixel. int ind = system_indices[k][j][i]; if (ind < 0) { // This pixel wasn't in the linear system (it's a non-fluid cell). // The output pressure will be set to zero (but not here since we don't // want to overwrite a cell not on our connected component. } else { pressure(i, j, k, bout) = pressure_pcg[ind] - mean; } } __global__ void copyDivergenceToSystem( THCDeviceTensor<int, 3> system_indices, THCDeviceTensor<float, 1> div_pcg, CudaRealGrid div, const int32_t ibatch) { const int32_t xsize = system_indices.getSize(2); const int32_t ysize = system_indices.getSize(1); const int32_t zsize = system_indices.getSize(0); int32_t b, chan, k, j, i; // b and chan will always be zero (because we call this on the non-batched // tensor). if (GetKernelIndices(1, 1, zsize, ysize, xsize, b, chan, k, j, i)) { return; } // Look up the system index for the current voxel / pixel. const int ind = system_indices[k][j][i]; if (ind >= 0) { // Fluid cell (so it's in the system), copy the divergence. div_pcg[ind] = div(i, j, k, ibatch); } } typedef enum { PRECOND_NONE, PRECOND_ILU0, PRECOND_IC0, } PrecondType; PrecondType StringToPrecondType(lua_State* L, const std::string& precond_type_str) { if (precond_type_str == "none") { return PRECOND_NONE; } else if (precond_type_str == "ilu0") { return PRECOND_ILU0; } else if (precond_type_str == "ic0") { return PRECOND_IC0; } else { luaL_error(L, "precondType is not supported."); return PRECOND_NONE; } } std::string PrecondTypeToString(const PrecondType precond_type) { switch (precond_type) { case PRECOND_NONE: return "none"; case PRECOND_ILU0: return "ilu0"; case PRECOND_IC0: return "ic0"; default: THError("Incorrect precond enum type."); exit(-1); } } static int tfluids_CudaMain_solveLinearSystemPCG(lua_State* L) { init_cublas(); // No op if already initialized. init_cusparse(); // No op if already initialized. THCState* state = cutorch_getstate(L); luaL_checktype(L, 1, LUA_TTABLE); // The first argument should be a table. THCudaTensor* p = reinterpret_cast<THCudaTensor*>( luaT_checkudata(L, 2, "torch.CudaTensor")); THCudaTensor* flags_gpu = reinterpret_cast<THCudaTensor*>( luaT_checkudata(L, 3, "torch.CudaTensor")); THCudaTensor* div = reinterpret_cast<THCudaTensor*>( luaT_checkudata(L, 4, "torch.CudaTensor")); const bool is_3d = static_cast<bool>(lua_toboolean(L, 5)); const std::string precond_type_str = static_cast<std::string>(lua_tostring(L, 6)); const float tol = static_cast<float>(lua_tonumber(L, 7)); const int64_t max_iter = static_cast<int64_t>(lua_tointeger(L, 8)); const bool verbose = static_cast<bool>(lua_toboolean(L, 9)); // This PCG routine uses a LOT of temporary storage. We will create tensors // in the tfluids._tmpPCG namespace (table) that are static to the library. // This means the tensors stick around between subsequent calls and are // resized on demand. // The remaining are temporary storage allocated on the lua side but static // to the library. They stick around but are resized on demand. THIntTensor* system_indices_cpu = allocTempTensor<THIntTensor>( L, "systemIndicesCPU", "torch.IntTensor", &THIntTensor_new); THIntTensor* row_cpu = allocTempTensor<THIntTensor>( L, "rowCPU", "torch.IntTensor", &THIntTensor_new); THIntTensor* components = allocTempTensor<THIntTensor>( L, "componentsCPU", "torch.IntTensor", &THIntTensor_new); THIntTensor* col_cpu = allocTempTensor<THIntTensor>( L, "colCPU", "torch.IntTensor", &THIntTensor_new); THFloatTensor* val_cpu = allocTempTensor<THFloatTensor>( L, "valCPU", "torch.FloatTensor", &THFloatTensor_new); THFloatTensor* flags_cpu = allocTempTensor<THFloatTensor>( L, "flagsCPU", "torch.FloatTensor", &THFloatTensor_new); THCudaIntTensor* row_gpu = allocTempCudaTensor<THCudaIntTensor>( L, "rowGPU", "torch.CudaIntTensor", &THCudaIntTensor_new); THCudaIntTensor* col_gpu = allocTempCudaTensor<THCudaIntTensor>( L, "colGPU", "torch.CudaIntTensor", &THCudaIntTensor_new); THCudaTensor* val_gpu = allocTempCudaTensor<THCudaTensor>( L, "valGPU", "torch.CudaTensor", &THCudaTensor_new); // TODO(tompson): I'm not convinced we need half of these. THCudaTensor* rhs_gpu = allocTempCudaTensor<THCudaTensor>( L, "rhsGPU", "torch.CudaTensor", &THCudaTensor_new); THCudaTensor* r_gpu = allocTempCudaTensor<THCudaTensor>( L, "rGPU", "torch.CudaTensor", &THCudaTensor_new); // residual vector THCudaTensor* val_precond_gpu = allocTempCudaTensor<THCudaTensor>( L, "valILU0GPU", "torch.CudaTensor", &THCudaTensor_new); THCudaTensor* x_gpu = allocTempCudaTensor<THCudaTensor>( L, "xGPU", "torch.CudaTensor", &THCudaTensor_new); THCudaTensor* d_gpu = allocTempCudaTensor<THCudaTensor>( L, "dGPU", "torch.CudaTensor", &THCudaTensor_new); THCudaTensor* y_gpu = allocTempCudaTensor<THCudaTensor>( L, "yGPU", "torch.CudaTensor", &THCudaTensor_new); THCudaTensor* zm1_gpu = allocTempCudaTensor<THCudaTensor>( L, "zm1GPU", "torch.CudaTensor", &THCudaTensor_new); THCudaTensor* zm2_gpu = allocTempCudaTensor<THCudaTensor>( L, "zm2GPU", "torch.CudaTensor", &THCudaTensor_new); THCudaTensor* p_gpu = allocTempCudaTensor<THCudaTensor>( // Search direction. L, "pGPU", "torch.CudaTensor", &THCudaTensor_new); THCudaTensor* omega_gpu = allocTempCudaTensor<THCudaTensor>( L, "omegaGPU", "torch.CudaTensor", &THCudaTensor_new); THCudaTensor* rm2_gpu = allocTempCudaTensor<THCudaTensor>( L, "rm2GPU", "torch.CudaTensor", &THCudaTensor_new); THCudaIntTensor* system_indices_gpu = allocTempCudaTensor<THCudaIntTensor>( L, "systemIndicesGPU", "torch.CudaIntTensor", &THCudaIntTensor_new); // We need the FLAG grid on the CPU, because that's where we're going to // construct the sparse matrix (Laplacian). THFloatTensor_resize5d(flags_cpu, flags_gpu->size[0], flags_gpu->size[1], flags_gpu->size[2], flags_gpu->size[3], flags_gpu->size[4]); THFloatTensor_copyCuda(state, flags_cpu, flags_gpu); // flags_cpu = flags_gpu tfluids_FloatFlagGrid flags(flags_cpu, is_3d); CudaRealGrid pressure = toCudaRealGrid(state, p, is_3d); CudaRealGrid divergence = toCudaRealGrid(state, div, is_3d); // Zero the pressure everywhere, this will zero out the pressure of the // non-fluid and empty region cells (because we don't touch them during the // pressure solve). THCudaTensor_zero(state, p); const int32_t xsize = flags.xsize(); const int32_t ysize = flags.ysize(); const int32_t zsize = flags.zsize(); const int32_t nbatch = flags.nbatch(); // We wont parallelize over batches, but process each sequentially. // TODO(tompson): We could at least parallelize all the Laplacian setups // over batch. float max_residual = -std::numeric_limits<float>::infinity(); for (int32_t ibatch = 0; ibatch < nbatch; ibatch++) { // Find connected components of fluid regions. If we combine these into a // single system then it will be singular (with non-positive pivot) and ICU0 // preconditioner will fail. Bridson talks about enforcing compatibility // conditioner by adding the null-space components to the RHS, this is one // solution. Another solution is to solve M PCG problems for each of the M // components (this is what we'll do). THIntTensor_resize3d(components, zsize, ysize, xsize); std::vector<int32_t> component_sizes; std::vector<Int3> single_components; const int32_t ncomponents = findConnectedFluidComponents( flags, components, ibatch, &component_sizes); // Now solve ncomponents linear systems. for (int32_t icomponent = 0; icomponent < ncomponents; icomponent++) { PrecondType precond_type = StringToPrecondType(L, precond_type_str); if (component_sizes[icomponent] == 1) { // Single components will not have a valid solution. Leave the pressure // at zero. if (verbose) { std::cout << "PCG batch " << (ibatch + 1) << " component " << (icomponent + 1) << " has size 1, skipping." << std::endl; } continue; } else { if (verbose) { std::cout << "PCG batch " << (ibatch + 1) << " component " << (icomponent + 1) << " has size " << component_sizes[icomponent] << "." << std::endl; } if (component_sizes[icomponent] < 5) { // Don't use a preconditioner, it's slower. precond_type = PRECOND_NONE; } } if (verbose) { std::cout << "PCG: " << (ibatch + 1) << " component " << (icomponent + 1) << " using precond type " << PrecondTypeToString(precond_type) << std::endl; } // We're going to create the sparse laplacian next, but we don't want all // zero rows (caused by obstacle cells). It guarantees that A is singular. // it causes issues with the preconditioner in cusparse, and it is // inefficient. Therefore we need to scan through the dataset and create // a map of fluid cells, with indices into our new system. THIntTensor_resize3d(system_indices_cpu, zsize, ysize, xsize); THCudaIntTensor_resize3d(state, system_indices_gpu, zsize, ysize, xsize); const int64_t numel = createReducedSystemIndices( flags, components, system_indices_cpu, ibatch, icomponent); // While we're at it, copy these system indices to the GPU (we'll need // them later). THCudaIntTensor_copyInt(state, system_indices_gpu, system_indices_cpu); // Recall: resize ops are a no-op if the storage shrinks or stays the // same. // Note: here we'll allocate the col and val arrays to the maximum // possible size (6 neighbors + 1 diagonal = 7). This would be for a // domain of all fluid cells, where we also include the border (which like // Manta we do not) and if the border cells had neighbors outside (which // they do not). So actually this is a conservative sizing. // If this is a problem we can always do two passes, one to get the number // of non-zero values, and one to fill them (as @kristofe used to do). THIntTensor_resize1d(row_cpu, numel + 1); THIntTensor_resize1d(col_cpu, numel * 7); THFloatTensor_resize1d(val_cpu, numel * 7); const bool upper_tri = precond_type == PRECOND_IC0; const int64_t nz = setupLaplacian(flags, ibatch, row_cpu, col_cpu, val_cpu, upper_tri, system_indices_cpu, components, icomponent); if (nz > col_cpu->size[0]) { luaL_error(L, "INTERNAL ERROR: num of non-zero elements is too large!."); } // Copy the sparse matrix values to the GPU, this time we'll only allocate // the number of non-zero values needed. THCudaIntTensor_resize1d(state, row_gpu, numel + 1); THCudaIntTensor_copyInt(state, row_gpu, row_cpu); THCudaIntTensor_resize1d(state, col_gpu, nz); { // Wrap for scoping of col_cpu_nz. // Recall: newNarrow(tensor, dim, first_index, size). THIntTensor* col_cpu_nz = THIntTensor_newNarrow(col_cpu, 0, 0, nz); THCudaIntTensor_copyInt(state, col_gpu, col_cpu_nz); THIntTensor_free(col_cpu_nz); } THCudaTensor_resize1d(state, val_gpu, nz); { // Wrap for scoping of val_cpu_nz. THFloatTensor* val_cpu_nz = THFloatTensor_newNarrow(val_cpu, 0, 0, nz); THCudaTensor_copyFloat(state, val_gpu, val_cpu_nz); THFloatTensor_free(val_cpu_nz); } // Create a description in cusparse of the A matrix that we've // created (the val, row and col values above). hipsparseMatDescr_t descr = 0; CHECK_CUSPARSE(hipsparseCreateMatDescr(&descr)); if (precond_type == PRECOND_IC0) { CHECK_CUSPARSE(hipsparseSetMatFillMode(descr, HIPSPARSE_FILL_MODE_UPPER)); CHECK_CUSPARSE(hipsparseSetMatType(descr, HIPSPARSE_MATRIX_TYPE_SYMMETRIC)); } else { CHECK_CUSPARSE(hipsparseSetMatType(descr, HIPSPARSE_MATRIX_TYPE_GENERAL)); } CHECK_CUSPARSE(hipsparseSetMatIndexBase(descr, HIPSPARSE_INDEX_BASE_ZERO)); // Also copy the rhs (aka div) to the 'b' tensor with reduced indices // and at the current batch index. THCudaTensor_resize1d(state, rhs_gpu, numel); THCDeviceTensor<int, 3> dev_inds = toDeviceTensor<int, 3>(state, system_indices_gpu); THCDeviceTensor<float, 1> dev_rhs = toDeviceTensor<float, 1>(state, rhs_gpu); LaunchKernel(L, &copyDivergenceToSystem, 1, 1, zsize, ysize, xsize, dev_inds, dev_rhs, divergence, ibatch); // Generate the Preconditioner. // Create the analysis info object for the A matrix. cusparseSolveAnalysisInfo_t info_a = 0; cusparseSolveAnalysisInfo_t info_u = 0; cusparseSolveAnalysisInfo_t info_ut = 0; // Only used by ic0. hipsparseMatDescr_t descr_l = 0; hipsparseMatDescr_t descr_u = 0; if (precond_type != PRECOND_NONE) { THCudaTensor_resize1d(state, val_precond_gpu, nz); THCudaTensor_copy(state, val_precond_gpu, val_gpu); CHECK_CUSPARSE(cusparseCreateSolveAnalysisInfo(&info_a)); CHECK_CUSPARSE(cusparseScsrsv_analysis( cusparse_handle, HIPSPARSE_OPERATION_NON_TRANSPOSE, numel, nz, descr, DEV_PTR(val_gpu), DEV_INT_PTR(row_gpu), DEV_INT_PTR(col_gpu), info_a)); if (precond_type == PRECOND_ILU0) { if (verbose) { std::cout << "PCG: Generating ILU0 preconditioner." << std::endl; } // Generate the Incomplete LU factor H for the matrix A. CHECK_CUSPARSE(cusparseScsrilu0( cusparse_handle, HIPSPARSE_OPERATION_NON_TRANSPOSE, numel, descr, DEV_PTR(val_precond_gpu), DEV_INT_PTR(row_gpu), DEV_INT_PTR(col_gpu), info_a)); // Create info objects for the ILU0 preconditioner. CHECK_CUSPARSE(cusparseCreateSolveAnalysisInfo(&info_u)); CHECK_CUSPARSE(hipsparseCreateMatDescr(&descr_l)); CHECK_CUSPARSE(hipsparseSetMatType( descr_l, HIPSPARSE_MATRIX_TYPE_GENERAL)); CHECK_CUSPARSE(hipsparseSetMatIndexBase( descr_l, HIPSPARSE_INDEX_BASE_ZERO)); CHECK_CUSPARSE(hipsparseSetMatFillMode( descr_l, HIPSPARSE_FILL_MODE_LOWER)); CHECK_CUSPARSE(hipsparseSetMatDiagType( descr_l, HIPSPARSE_DIAG_TYPE_UNIT)); CHECK_CUSPARSE(hipsparseCreateMatDescr(&descr_u)); CHECK_CUSPARSE(hipsparseSetMatType( descr_u, HIPSPARSE_MATRIX_TYPE_GENERAL)); CHECK_CUSPARSE(hipsparseSetMatIndexBase( descr_u, HIPSPARSE_INDEX_BASE_ZERO)); CHECK_CUSPARSE(hipsparseSetMatFillMode( descr_u, HIPSPARSE_FILL_MODE_UPPER)); CHECK_CUSPARSE(hipsparseSetMatDiagType( descr_u, HIPSPARSE_DIAG_TYPE_NON_UNIT)); CHECK_CUSPARSE(cusparseScsrsv_analysis( cusparse_handle, HIPSPARSE_OPERATION_NON_TRANSPOSE, numel, nz, descr_u, DEV_PTR(val_gpu), DEV_INT_PTR(row_gpu), DEV_INT_PTR(col_gpu), info_u)); } else if (precond_type == PRECOND_IC0) { if (verbose) { std::cout << "PCG: Generating IC0 preconditioner." << std::endl; } CHECK_CUSPARSE(cusparseScsric0( cusparse_handle, HIPSPARSE_OPERATION_NON_TRANSPOSE, numel, descr, DEV_PTR(val_precond_gpu), DEV_INT_PTR(row_gpu), DEV_INT_PTR(col_gpu), info_a)); CHECK_CUSPARSE(hipsparseCreateMatDescr(&descr_u)); CHECK_CUSPARSE(hipsparseSetMatType( descr_u, HIPSPARSE_MATRIX_TYPE_TRIANGULAR)); CHECK_CUSPARSE(hipsparseSetMatIndexBase( descr_u, HIPSPARSE_INDEX_BASE_ZERO)); CHECK_CUSPARSE(hipsparseSetMatFillMode( descr_u, HIPSPARSE_FILL_MODE_UPPER)); CHECK_CUSPARSE(cusparseCreateSolveAnalysisInfo(&info_ut)); CHECK_CUSPARSE(cusparseScsrsv_analysis( cusparse_handle, HIPSPARSE_OPERATION_TRANSPOSE, numel, nz, descr_u, DEV_PTR(val_precond_gpu), DEV_INT_PTR(row_gpu), DEV_INT_PTR(col_gpu), info_ut)); CHECK_CUSPARSE(cusparseCreateSolveAnalysisInfo(&info_u)); CHECK_CUSPARSE(cusparseScsrsv_analysis( cusparse_handle, HIPSPARSE_OPERATION_NON_TRANSPOSE, numel, nz, descr_u, DEV_PTR(val_precond_gpu), DEV_INT_PTR(row_gpu), DEV_INT_PTR(col_gpu), info_u)); } else { luaL_error(L, "Incorrect preconType ('none', 'ic0', 'ilu0')"); } } // While we're at it, set the pressure value to a zero value (note we // could also use the previous frame's pressure). This is CG's initial // guess. THCudaTensor_resize1d(state, x_gpu, numel); THCudaTensor_zero(state, x_gpu); // TODO(tompson): Move all these to the start of the function. THCudaTensor_resize1d(state, y_gpu, numel); THCudaTensor_resize1d(state, p_gpu, numel); THCudaTensor_resize1d(state, omega_gpu, numel); THCudaTensor_resize1d(state, zm1_gpu, numel); THCudaTensor_resize1d(state, zm2_gpu, numel); THCudaTensor_resize1d(state, rm2_gpu, numel); // TODO(tompson): Do we need yet another copy of the RHS? // The algorithm we're implementing here is from Matrix Computations, // Golub and Van Loan, Algorithm 10.3.1: int32_t iter = 0; float r_norm_sq1; // r_norm_sq1 is the current residual float r_norm_sq0; // r_norm_sq0 is the previous iteration's residual // Since we start with x = 0, the initial residual is just the norm of the // rhs (i.e. residual = ||rhs - A * x|| = ||rhs||) THCudaTensor_resize1d(state, r_gpu, numel); // residual THCudaTensor_copy(state, r_gpu, rhs_gpu); CHECK_CUBLAS(hipblasSdot(cublas_handle, numel, DEV_PTR(r_gpu), 1, DEV_PTR(r_gpu), 1, &r_norm_sq1)); if (isnan(r_norm_sq1)) { luaL_error(L, "PCG Error: starting residual is nan!"); } const float one = 1.0f; const float zero = 0.0f; float numerator; float denominator; float beta; float alpha; float nalpha; if (verbose) { std::cout << "PCG batch " << (ibatch + 1) << " comp " << (icomponent + 1) << ": starting residual " << std::sqrt(r_norm_sq1) << " (tol " << tol << ", precondType = " << PrecondTypeToString(precond_type) << ")" << std::endl; } // epsilon ~= 1e-38 (just prevents divide by zero). const float epsilon = std::numeric_limits<float>::min(); while (r_norm_sq1 > tol * tol && iter <= max_iter) { if (precond_type == PRECOND_ILU0) { // Solve M * z_k = r_k CHECK_CUSPARSE(cusparseScsrsv_solve( cusparse_handle, HIPSPARSE_OPERATION_NON_TRANSPOSE, numel, &one, descr_l, DEV_PTR(val_precond_gpu), DEV_INT_PTR(row_gpu), DEV_INT_PTR(col_gpu), info_a, DEV_PTR(r_gpu), DEV_PTR(y_gpu))); CHECK_CUSPARSE(cusparseScsrsv_solve( cusparse_handle, HIPSPARSE_OPERATION_NON_TRANSPOSE, numel, &one, descr_u, DEV_PTR(val_precond_gpu), DEV_INT_PTR(row_gpu), DEV_INT_PTR(col_gpu), info_u, DEV_PTR(y_gpu), DEV_PTR(zm1_gpu))); } else if (precond_type == PRECOND_IC0) { CHECK_CUSPARSE(cusparseScsrsv_solve( cusparse_handle, HIPSPARSE_OPERATION_TRANSPOSE, numel, &one, descr_u, DEV_PTR(val_precond_gpu), DEV_INT_PTR(row_gpu), DEV_INT_PTR(col_gpu), info_ut, DEV_PTR(r_gpu), DEV_PTR(y_gpu))); CHECK_CUSPARSE(cusparseScsrsv_solve( cusparse_handle, HIPSPARSE_OPERATION_NON_TRANSPOSE, numel, &one, descr_u, DEV_PTR(val_precond_gpu), DEV_INT_PTR(row_gpu), DEV_INT_PTR(col_gpu), info_u, DEV_PTR(y_gpu), DEV_PTR(zm1_gpu))); } iter++; // k = k + 1 // Calculate the next search direction p_k. if (iter == 1) { if (precond_type != PRECOND_NONE) { THCudaTensor_copy(state, p_gpu, zm1_gpu); // p_1 = z_0 } else { THCudaTensor_copy(state, p_gpu, r_gpu); // p_1 = r_0 } } else { if (precond_type != PRECOND_NONE) { // beta_k = r_{k_1}^T * z_{k - 1} / (r_{k-2}^T * z_{k - 2}) CHECK_CUBLAS(hipblasSdot(cublas_handle, numel, DEV_PTR(r_gpu), 1, DEV_PTR(zm1_gpu), 1, &numerator)); CHECK_CUBLAS(hipblasSdot(cublas_handle, numel, DEV_PTR(rm2_gpu), 1, DEV_PTR(zm2_gpu), 1, &denominator)); beta = numerator / clampToEpsilon(denominator, epsilon); // p_k = z_{k - 1} + beta_k * p_{k - 1} CHECK_CUBLAS(hipblasSscal( cublas_handle, numel, &beta, DEV_PTR(p_gpu), 1)); CHECK_CUBLAS(hipblasSaxpy( cublas_handle, numel, &one, DEV_PTR(zm1_gpu), 1, DEV_PTR(p_gpu), 1)); } else { beta = r_norm_sq1 / clampToEpsilon(r_norm_sq0, epsilon); CHECK_CUBLAS(hipblasSscal(cublas_handle, numel, &beta, DEV_PTR(p_gpu), 1)); CHECK_CUBLAS(hipblasSaxpy(cublas_handle, numel, &one, DEV_PTR(r_gpu), 1, DEV_PTR(p_gpu), 1)); } } // alpha_k = r_{k-1}^T * z_{k - 1} / (p_k^T * A * p_k) // omega_k = A * p_k. // Recall: hipsparseScsrmv is a sparse matrix-vec + const operator. // TODO(tompson): should the sparse descr be descr_u? CHECK_CUSPARSE(hipsparseScsrmv( cusparse_handle, HIPSPARSE_OPERATION_NON_TRANSPOSE, numel, numel, nz, &one, descr, DEV_PTR(val_gpu), DEV_INT_PTR(row_gpu), DEV_INT_PTR(col_gpu), DEV_PTR(p_gpu), &zero, DEV_PTR(omega_gpu))); if (precond_type != PRECOND_NONE) { // numerator = r_{k-1}^T * z_{k - 1} CHECK_CUBLAS(hipblasSdot(cublas_handle, numel, DEV_PTR(r_gpu), 1, DEV_PTR(zm1_gpu), 1, &numerator)); // denominator = p_k^T * A * p_k = p_k^T * omega_k CHECK_CUBLAS(hipblasSdot(cublas_handle, numel, DEV_PTR(p_gpu), 1, DEV_PTR(omega_gpu), 1, &denominator)); } else { numerator = r_norm_sq1; CHECK_CUBLAS(hipblasSdot(cublas_handle, numel, DEV_PTR(p_gpu), 1, DEV_PTR(omega_gpu), 1, &denominator)); } alpha = numerator / clampToEpsilon(denominator, epsilon); // x_k = x_{k - 1} + alpha_k * p_k // Recall: hipblasSaxpy(handle, n, alpha, x, incx, y, incy) performs: // --> y [ j ] = alpha x [ k ] + y [ j ] CHECK_CUBLAS(hipblasSaxpy( cublas_handle, numel, &alpha, DEV_PTR(p_gpu), 1, DEV_PTR(x_gpu), 1)); if (precond_type != PRECOND_NONE) { THCudaTensor_copy(state, rm2_gpu, r_gpu); // rm2_gpu = r_gpu THCudaTensor_copy(state, zm2_gpu, zm1_gpu); // zm2_gpu = zm1_gpu } nalpha = -alpha; // According to Shewchuck we should re-init r every 50 iterations. // EDIT(tompson): It doesn't seem to help (removed but in git history). // r_k = r_{k - 1} - alpha_k * A * p_k = r_{k - 1} - alpha_k * omega_k CHECK_CUBLAS(hipblasSaxpy(cublas_handle, numel, &nalpha, DEV_PTR(omega_gpu), 1, DEV_PTR(r_gpu), 1)); r_norm_sq0 = r_norm_sq1; // Update previous residual. // Finally, calculate the new residual. CHECK_CUBLAS(hipblasSdot(cublas_handle, numel, DEV_PTR(r_gpu), 1, DEV_PTR(r_gpu), 1, &r_norm_sq1)); if (verbose) { std::cout << "PCG batch " << (ibatch + 1) << " comp " << (icomponent + 1) << " iter " << iter << ": residual " << std::sqrt(r_norm_sq1) << " (tol " << tol << ", precondType = " << PrecondTypeToString(precond_type) << ")" << std::endl; } if (isnan(r_norm_sq1)) { luaL_error(L, "ERROR: r_norm_sq1 is nan!"); } } /* std::cout << "PCG batch " << (ibatch + 1) << " comp " << (icomponent + 1) << " iter " << iter << ": residual " << std::sqrt(r_norm_sq1) << " (tol " << tol << ", precondType = " << PrecondTypeToString(precond_type) << ")" << std::endl; */ if (verbose) { if (iter == max_iter) { std::cout << "PCG batch " << (ibatch + 1) << " component " << (icomponent + 1) << " hit max iteration count (" << max_iter << ")" << std::endl; } else if (r_norm_sq1 < tol * tol) { std::cout << "PCG batch " << (ibatch + 1) << " component " << (icomponent + 1) << " residual " << std::sqrt(r_norm_sq1) << " fell below tol (" << tol << ")" << std::endl; } } max_residual = ::max(max_residual, std::sqrt(r_norm_sq1)); // For each separate linear system we're free to choose whatever constant // DC term we want. This has no impact on the velocity update, but will // be important if we include a pressure term when training the convnet // (which doesn't like arbitrary output scales / offsets). const float x_mean = THCudaTensor_meanall(state, x_gpu); // The result is in x_gpu. However, this is the pressure in the reduced // system (with non-fluid cells removed), we need to copy this back to the // original Cartesian (d x w x h) system. THCDeviceTensor<float, 1> dev_x = toDeviceTensor<float, 1>(state, x_gpu); LaunchKernel(L, &copyPressureFromSystem, 1, 1, zsize, ysize, xsize, dev_inds, dev_x, pressure, ibatch, x_mean); // Clean up cusparse. // TODO(tompson): Is there anything else to do? CHECK_CUSPARSE(hipsparseDestroyMatDescr(descr)); if (precond_type != PRECOND_NONE) { CHECK_CUSPARSE(cusparseDestroySolveAnalysisInfo(info_a)); CHECK_CUSPARSE(cusparseDestroySolveAnalysisInfo(info_u)); CHECK_CUSPARSE(hipsparseDestroyMatDescr(descr_l)); CHECK_CUSPARSE(hipsparseDestroyMatDescr(descr_u)); if (precond_type == PRECOND_IC0) { CHECK_CUSPARSE(cusparseDestroySolveAnalysisInfo(info_ut)); } } } // for each connected component } // for each batch. lua_pushnumber(L, max_residual); return 1; } //****************************************************************************** // solveLinearSystemJacobi //****************************************************************************** __global__ void kernel_jacobiIteration( CudaFlagGrid flags, CudaRealGrid div, CudaRealGrid pressure, CudaRealGrid prev_pressure, const int bnd) { int32_t b, chan, k, j, i; if (GetKernelIndices(flags, b, chan, k, j, i)) { return; } if (i < bnd || i > flags.xsize() - 1 - bnd || j < bnd || j > flags.ysize() - 1 - bnd || (flags.is_3d() && (k < bnd || k > flags.zsize() - 1 - bnd))) { pressure(i, j, k, b) = 0; // Zero pressure on the border. return; } if (flags.isObstacle(i, j, k, b)) { pressure(i, j, k, b) = 0; return; } // Otherwise in a fluid or empty cell. // TODO(tompson): Is the logic here correct? Should empty cells be non-zero? const float divergence = div(i, j, k, b); // Get all the neighbors const float pC = prev_pressure(i, j, k, b); float p1 = prev_pressure(i - 1, j, k, b); float p2 = prev_pressure(i + 1, j, k, b); float p3 = prev_pressure(i, j - 1, k, b); float p4 = prev_pressure(i, j + 1, k, b); float p5 = flags.is_3d() ? prev_pressure(i, j, k - 1, b) : 0; float p6 = flags.is_3d() ? prev_pressure(i, j, k + 1, b) : 0; if (flags.isObstacle(i - 1, j, k, b)) { p1 = pC; } if (flags.isObstacle(i + 1, j, k, b)) { p2 = pC; } if (flags.isObstacle(i, j - 1, k, b)) { p3 = pC; } if (flags.isObstacle(i, j + 1, k, b)) { p4 = pC; } if (flags.is_3d() && flags.isObstacle(i, j, k - 1, b)) { p5 = pC; } if (flags.is_3d() && flags.isObstacle(i, j, k + 1, b)) { p6 = pC; } const float denom = flags.is_3d() ? 6.0f : 4.0f; const float v = (p1 + p2 + p3 + p4 + p5 + p6 + divergence) / denom; pressure(i, j, k, b) = v; } static int tfluids_CudaMain_solveLinearSystemJacobi(lua_State* L) { THCState* state = cutorch_getstate(L); THCudaTensor* tensor_p = reinterpret_cast<THCudaTensor*>( luaT_checkudata(L, 1, "torch.CudaTensor")); THCudaTensor* tensor_flags = reinterpret_cast<THCudaTensor*>( luaT_checkudata(L, 2, "torch.CudaTensor")); THCudaTensor* tensor_div = reinterpret_cast<THCudaTensor*>( luaT_checkudata(L, 3, "torch.CudaTensor")); THCudaTensor* tensor_p_prev = reinterpret_cast<THCudaTensor*>( luaT_checkudata(L, 4, "torch.CudaTensor")); THCudaTensor* tensor_p_delta = reinterpret_cast<THCudaTensor*>( luaT_checkudata(L, 5, "torch.CudaTensor")); THCudaTensor* tensor_p_delta_norm = reinterpret_cast<THCudaTensor*>( luaT_checkudata(L, 6, "torch.CudaTensor")); const bool is_3d = static_cast<bool>(lua_toboolean(L, 7)); const float p_tol = static_cast<float>(lua_tonumber(L, 8)); const int64_t max_iter = static_cast<int64_t>(lua_tointeger(L, 9)); const bool verbose = static_cast<int64_t>(lua_toboolean(L, 10)); if (max_iter < 1) { luaL_error(L, "At least 1 iteration is needed (maxIter < 1)"); return 0; } CudaFlagGrid flags = toCudaFlagGrid(state, tensor_flags, is_3d); CudaRealGrid pressure = toCudaRealGrid(state, tensor_p, is_3d); CudaRealGrid pressure_prev = toCudaRealGrid(state, tensor_p_prev, is_3d); CudaRealGrid div = toCudaRealGrid(state, tensor_div, is_3d); // Initialize the pressure to zero. THCudaTensor_zero(state, tensor_p); THCudaTensor_zero(state, tensor_p_prev); // Start with the output of the next iteration going to pressure. CudaRealGrid* cur_pressure = &pressure; CudaRealGrid* cur_pressure_prev = &pressure_prev; const int32_t nbatch = flags.nbatch(); const int64_t xsize = flags.xsize(); const int64_t ysize = flags.ysize(); const int64_t zsize = flags.zsize(); const int64_t numel = xsize * ysize * zsize; float residual; int64_t iter = 0; while (true) { const int32_t bnd = 1; // LaunchKernel args: lua_State, func, domain, args... LaunchKernel(L, &kernel_jacobiIteration, flags, flags, div, *cur_pressure, *cur_pressure_prev, bnd); // Current iteration output is now in cur_pressure (wherever that points). // Calculate the change in pressure up to a sign (i.e. the sign might be // incorrect, but we don't care). THCudaTensor_csub(state, tensor_p_delta, tensor_p, 1.0f, tensor_p_prev); THCudaTensor_resize2d(state, tensor_p_delta, nbatch, numel); // Calculate L2 norm over dim 2. THCudaTensor_norm(state, tensor_p_delta_norm, tensor_p_delta,2.0, 2, 1); // Put the view back. THCudaTensor_resize5d(state, tensor_p_delta, nbatch, 1, zsize, ysize, xsize); residual = THCudaTensor_maxall(state, tensor_p_delta_norm); if (verbose) { std::cout << "Jacobi iteration " << (iter + 1) << ": residual " << residual << std::endl; } // TODO(tompson) calculate divergence and implement divtol (it'll make it // slower). // TODO(tompson): We terminate on the worst case batch is this OK? if (residual < p_tol) { if (verbose) { std::cout << "Jacobi max residual fell below p_tol (" << p_tol << ") (terminating)" << std::endl; } break; } iter++; if (iter >= max_iter) { if (verbose) { std::cout << "Jacobi max iteration count (" << max_iter << ") reached (terminating)" << std::endl; } break; } // We haven't yet terminated. CudaRealGrid* tmp = cur_pressure; cur_pressure = cur_pressure_prev; cur_pressure_prev = tmp; } // If we terminated with the cur_pressure pointing to the tmp array, then we // have to copy the pressure back into the output tensor. if (cur_pressure == &pressure_prev) { THCudaTensor_copy(state, tensor_p, tensor_p_prev); // p = p_prev } // Note, mean-subtraction is performed on the lua side. lua_pushnumber(L, residual); return 1; } //****************************************************************************** // INIT METHODS //****************************************************************************** static const struct luaL_Reg tfluids_CudaMain__ [] = { {"advectScalar", tfluids_CudaMain_advectScalar}, {"advectVel", tfluids_CudaMain_advectVel}, {"setWallBcsForward", tfluids_CudaMain_setWallBcsForward}, {"vorticityConfinement", tfluids_CudaMain_vorticityConfinement}, {"addBuoyancy", tfluids_CudaMain_addBuoyancy}, {"addGravity", tfluids_CudaMain_addGravity}, {"velocityUpdateForward", tfluids_CudaMain_velocityUpdateForward}, {"velocityUpdateBackward", tfluids_CudaMain_velocityUpdateBackward}, {"velocityDivergenceForward", tfluids_CudaMain_velocityDivergenceForward}, {"velocityDivergenceBackward", tfluids_CudaMain_velocityDivergenceBackward}, {"emptyDomain", tfluids_CudaMain_emptyDomain}, {"flagsToOccupancy", tfluids_CudaMain_flagsToOccupancy}, {"solveLinearSystemPCG", tfluids_CudaMain_solveLinearSystemPCG}, {"solveLinearSystemJacobi", tfluids_CudaMain_solveLinearSystemJacobi}, {"volumetricUpSamplingNearestForward", tfluids_CudaMain_volumetricUpSamplingNearestForward}, {"volumetricUpSamplingNearestBackward", tfluids_CudaMain_volumetricUpSamplingNearestBackward}, {"signedDistanceField", tfluids_CudaMain_signedDistanceField}, {NULL, NULL} // NOLINT }; const struct luaL_Reg* tfluids_CudaMain_getMethodsTable() { return tfluids_CudaMain__; } void tfluids_CudaMain_init(lua_State* L) { luaT_pushmetatable(L, "torch.CudaTensor"); luaT_registeratname(L, tfluids_CudaMain__, "tfluids"); }
69310691d14bc120180217b0833df52574fc245b.cu
// Copyright 2016 Google Inc, NYU. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. #include <TH.h> #include <THC.h> #include <luaT.h> #include <assert.h> #include <cusparse.h> #include <cublas_v2.h> #include <float.h> #include <algorithm> #include "THCDeviceTensor.cuh" #include "THCDeviceTensorUtils.cuh" #include "THCDeviceUtils.cuh" #include "THCReduceApplyUtils.cuh" #include "generic/advect_type.h" #include "third_party/cell_type.h" #include "third_party/grid.cu.h" #include "generic/int3.cu.h" #include "generic/vec3.cu.h" // The PCG code also does some processing on the CPU, and so we need the // headers for grid, vec3, etc. #define torch_(NAME) TH_CONCAT_3(torch_, Real, NAME) #define torch_Tensor TH_CONCAT_STRING_3(torch., Real, Tensor) #define tfluids_(NAME) TH_CONCAT_3(tfluids_, Real, NAME) #define real float #define accreal double #define Real Float #define THInf FLT_MAX #define TH_REAL_IS_FLOAT #include "generic/vec3.h" #include "third_party/grid.h" #include "generic/find_connected_fluid_components.h" #undef accreal #undef real #undef Real #undef THInf #undef TH_REAL_IS_FLOAT #include "generic/calc_line_trace.cu" const int threads_per_block = 512; // Might need 256 for old SM. const int64_t cuda_num_threads = 1024; // Might need 256 for old SM. // This is REALLY ugly. But unfortunately cutorch_getstate() in // cutorch/torch/util.h is not exposed externally. We could call // cutorch.getState() from lua and pass in the struct into all the tfluids c // functions (as Soumith did with nn and cunn), but I think this is also just // as ugly. Instead lets just redefine cutorch_getstate and hope nothing // breaks :-( struct THCState* cutorch_getstate(lua_State* L) { lua_getglobal(L, "cutorch"); lua_getfield(L, -1, "_state"); struct THCState* state = reinterpret_cast<THCState*>(lua_touserdata(L, -1)); lua_pop(L, 2); return state; } // ***************************************************************************** // LaunchKernel // ***************************************************************************** // A simple helper function to reduce the amount of boiler plate code required // to launch a kernel (it also cuts down the number of potential bugs). // // All our kernels use an unknown number of parameters, so we'll need to // pass in a function pointer with the correct signature as well as the // arg lists. // // @template TFuncPtr: kernel func ptr. The compiler will autocomplete this! // @template Args: Again, you do not need to define it (see emptyDomain). // @param: func - the kernel function to call. // @param: <x>size - The size of the domain that the kernel will be launched // over. This MUST match the domain used in GetKernelIndices. // @param: args - the variable size argument list that the kernel takes as // input. template <typename TFuncPtr, typename... Args> // C++11 varadic function static void LaunchKernel(lua_State* L, TFuncPtr func, const int bsize, const int csize, const int zsize, const int ysize, const int xsize, Args... args) { THCState* state = cutorch_getstate(L); // Create the kernel grid and block sizes. // TODO(tompson): What if csize is 1 (i.e. scalar domains). Is this slower? int nplane = xsize * ysize * zsize; dim3 grid_size(THCCeilDiv(nplane, threads_per_block), csize, bsize); dim3 block_size(nplane > threads_per_block ? threads_per_block : nplane); // Call the function. func<<<grid_size, block_size, 0, THCState_getCurrentStream(state)>>>(args...); } // Same as above, but on a one of our Grid objects. template <typename TFuncPtr, typename... Args> // C++11 varadic function static void LaunchKernel(lua_State* L, TFuncPtr func, const CudaGridBase& domain, Args... args) { THCState* state = cutorch_getstate(L); const int xsize = domain.xsize(); const int ysize = domain.ysize(); const int zsize = domain.zsize(); const int csize = domain.nchan(); const int bsize = domain.nbatch(); // Create the kernel grid and block sizes. // TODO(tompson): What if csize is 1 (i.e. scalar domains). Is this slower? int nplane = xsize * ysize * zsize; dim3 grid_size(THCCeilDiv(nplane, threads_per_block), csize, bsize); dim3 block_size(nplane > threads_per_block ? threads_per_block : nplane); // Call the function. func<<<grid_size, block_size, 0, THCState_getCurrentStream(state)>>>(args...); THCudaCheck(cudaGetLastError()); } inline int64_t GetBlocks(const int64_t n) { return (n + cuda_num_threads - 1) / cuda_num_threads; } // This method will launch a kernel over the entire domain numel. template <typename TFuncPtr, typename... Args> // C++11 varadic function static void LaunchKernelLoop(lua_State* L, TFuncPtr func, const CudaGridBase& domain, Args... args) { THCState* state = cutorch_getstate(L); // Call the function. // const int64_t numel = THCudaTensor_nElement(state, domain); const int64_t numel = domain.numel(); func<<<GetBlocks(numel), cuda_num_threads, 0, THCState_getCurrentStream(state)>>>(args...); THCudaCheck(cudaGetLastError()); } // Assumes you're iterating over a scalar domain (i.e nchan = 1 for the domain // you're iterating over). The LaunchKernelLoop forces this since you cannot // specify a nchan. __device__ __forceinline__ void PntIdToScalarIndices( const int32_t nbatch, const int32_t zsize, const int32_t ysize, const int32_t xsize, const int32_t& pnt_id, int32_t& batch, int32_t& k, int32_t& j, int32_t& i) { i = pnt_id % xsize; j = (pnt_id / xsize) % ysize; k = (pnt_id / xsize / ysize) % zsize; batch = (pnt_id / xsize / ysize / zsize); } // CUDA: grid stride looping. // This strategy comes from similar code in the cunn library. #define CUDA_KERNEL_LOOP(numel, pnt_id) \ for (int32_t pnt_id = blockIdx.x * blockDim.x + threadIdx.x; \ pnt_id < (numel); \ pnt_id += blockDim.x * gridDim.x) // ***************************************************************************** // GetKernelIndices // ***************************************************************************** // Another helper function to get back the batch, chan, k, j, i indices in a // kernel launch by the LaunchKernel function above. // // If GetKernelIndices returns true, then the current kernel is out of the // domain (and so you should just exist the kernel). This happens because // the tensor may not fill up the last grid. // // Note, you should ALWAYS pass in the same sizes as the tensor you used // to call the kernel in LaunchKernel's domain parameter. __device__ __forceinline__ bool GetKernelIndices( const int32_t bsize, const int32_t csize, const int32_t zsize, const int32_t ysize, const int32_t xsize, int32_t& batch, int32_t& chan, int32_t& k, int32_t& j, int32_t& i) { const int pnt_id = threadIdx.x + blockIdx.x * blockDim.x; chan = blockIdx.y; batch = blockIdx.z; if (pnt_id >= zsize * ysize * xsize) { return true; } i = pnt_id % xsize; j = (pnt_id / xsize) % ysize; k = pnt_id / (xsize * ysize); return false; } // Same as above but on one of our Grid objects. __device__ __forceinline__ bool GetKernelIndices( const CudaGridBase& domain, int32_t& batch, int32_t& chan, int32_t& k, int32_t& j, int32_t& i) { const int pnt_id = threadIdx.x + blockIdx.x * blockDim.x; chan = blockIdx.y; batch = blockIdx.z; if (pnt_id >= (domain.zsize() * domain.ysize() * domain.xsize())) { return true; } i = pnt_id % domain.xsize(); j = (pnt_id / domain.xsize()) % domain.ysize(); k = pnt_id / (domain.ysize() * domain.xsize()); return false; } // There are a LOT of methods in tfluids that borrow heavily (or port) parts of // Manta. These are compiled here but note that they are added under a separate // license. You should see FluidNet/torch/tfluids/third_party/README for more // information. #include "third_party/tfluids.cu" // ***************************************************************************** // velocityDivergenceBackward // ***************************************************************************** __global__ void velocityDivergenceBackward( CudaFlagGrid flags, CudaMACGrid grad_u, CudaRealGrid grad_output, const int32_t bnd) { int32_t b, chan, k, j, i; if (GetKernelIndices(flags, b, chan, k, j, i)) { return; } if (i < bnd || i > flags.xsize() - 1 - bnd || j < bnd || j > flags.ysize() - 1 - bnd || (flags.is_3d() && (k < bnd || k > flags.zsize() - 1 - bnd))) { // Manta zeros stuff on the border in the forward pass, so they do // not contribute gradient. return; } if (!flags.isFluid(i, j, k, b)) { // Blocked cells don't contribute gradient. return; } // TODO(tompson): I'm sure these atomic add calls are slow! We should // probably change this from a scatter to a gather op to avoid having to use // them at all. // (NVIDIA state that atomic operations on global memory are extremely slow) // but on shared memory it is OK. So we could copy to shared first, use // atomic ops there then use a small number of atomic ops back to global mem // (probably rewriting it as a gather would be easier). const float go = grad_output(i, j, k, b); atomicAdd(&grad_u(i, j, k, 0, b), go); atomicAdd(&grad_u(i + 1, j, k, 0, b), -go); atomicAdd(&grad_u(i, j, k, 1, b), go); atomicAdd(&grad_u(i, j + 1, k, 1, b), -go); if (flags.is_3d()) { atomicAdd(&grad_u(i, j, k, 2, b), go); atomicAdd(&grad_u(i, j, k + 1, 2, b), -go); } } static int tfluids_CudaMain_velocityDivergenceBackward(lua_State* L) { THCState* state = cutorch_getstate(L); // Get the args from the lua stack. NOTE: We do ALL arguments (size checking) // on the lua stack. THCudaTensor* tensor_u = reinterpret_cast<THCudaTensor*>( luaT_checkudata(L, 1, "torch.CudaTensor")); THCudaTensor* tensor_flags = reinterpret_cast<THCudaTensor*>( luaT_checkudata(L, 2, "torch.CudaTensor")); THCudaTensor* tensor_grad_output = reinterpret_cast<THCudaTensor*>( luaT_checkudata(L, 3, "torch.CudaTensor")); const bool is_3d = static_cast<bool>(lua_toboolean(L, 4)); THCudaTensor* tensor_grad_u = reinterpret_cast<THCudaTensor*>( luaT_checkudata(L, 5, "torch.CudaTensor")); CudaFlagGrid flags = toCudaFlagGrid(state, tensor_flags, is_3d); CudaMACGrid grad_u = toCudaMACGrid(state, tensor_grad_u, is_3d); CudaRealGrid grad_output = toCudaRealGrid(state, tensor_grad_output, is_3d); // Firstly, we're going to accumulate gradient contributions, so set // grad_u to 0. THCudaTensor_zero(state, tensor_grad_u); // LaunchKernel args: lua_State, func, domain, args... const int32_t bnd = 1; LaunchKernel(L, &velocityDivergenceBackward, flags, flags, grad_u, grad_output, bnd); return 0; // Recall: number of return values on the lua stack. } // ***************************************************************************** // emptyDomain // ***************************************************************************** __global__ void emptyDomainLoop( CudaFlagGrid flags, const bool is_3d, const int32_t bnd, const int32_t nbatch, const int32_t zsize, const int32_t ysize, const int32_t xsize, const int32_t numel) { int32_t b, k, j, i; CUDA_KERNEL_LOOP(numel, pnt_id) { PntIdToScalarIndices(nbatch, zsize, ysize, xsize, pnt_id, b, k, j, i); if (i < bnd || i > flags.xsize() - 1 - bnd || j < bnd || j > flags.ysize() - 1 - bnd || (is_3d && (k < bnd || k > flags.zsize() - 1 - bnd))) { flags(i, j, k, b) = TypeObstacle; } else { flags(i, j, k, b) = TypeFluid; } } } __global__ void emptyDomain( CudaFlagGrid flags, const bool is_3d, const int32_t bnd) { int32_t b, dim, k, j, i; if (GetKernelIndices(flags, b, dim, k, j, i)) { return; } if (i < bnd || i > flags.xsize() - 1 - bnd || j < bnd || j > flags.ysize() - 1 - bnd || (is_3d && (k < bnd || k > flags.zsize() - 1 - bnd))) { flags(i, j, k, b) = TypeObstacle; } else { flags(i, j, k, b) = TypeFluid; } } static int tfluids_CudaMain_emptyDomain(lua_State* L) { THCState* state = cutorch_getstate(L); // Get the args from the lua stack. NOTE: We do ALL arguments (size checking) // on the lua stack. THCudaTensor* tensor_flags = reinterpret_cast<THCudaTensor*>( luaT_checkudata(L, 1, "torch.CudaTensor")); const bool is_3d = static_cast<bool>(lua_toboolean(L, 2)); const int32_t bnd = static_cast<int32_t>(lua_tointeger(L, 3)); CudaFlagGrid flags = toCudaFlagGrid(state, tensor_flags, is_3d); // LaunchKernel args: lua_State, func, domain, args... // Looped version - Actually not really any faster.. // LaunchKernelLoop(L, &emptyDomainLoop, flags, // flags, is_3d, bnd, flags.nbatch(), flags.zsize(), // flags.ysize(), flags.xsize(), flags.numel()); LaunchKernel(L, &emptyDomain, flags, flags, is_3d, bnd); return 0; } // ***************************************************************************** // flagsToOccupancy // ***************************************************************************** __global__ void flagsToOccupancy(CudaFlagGrid flags, CudaFlagGrid occupancy) { int32_t b, chan, k, j, i; if (GetKernelIndices(flags, b, chan, k, j, i)) { return; } float val; if (flags.isFluid(i, j, k, b)) { val = 0; } else if (flags.isObstacle(i, j, k, b)) { val = 1; } else { val = -1; // Can't throw error in kernel. Set to -1 and check min. } occupancy(i, j, k, b) = val; } static int tfluids_CudaMain_flagsToOccupancy(lua_State* L) { THCState* state = cutorch_getstate(L); // Get the args from the lua stack. NOTE: We do ALL arguments (size checking) // on the lua stack. THCudaTensor* tensor_flags = reinterpret_cast<THCudaTensor*>( luaT_checkudata(L, 1, "torch.CudaTensor")); THCudaTensor* tensor_occupancy = reinterpret_cast<THCudaTensor*>( luaT_checkudata(L, 2, "torch.CudaTensor")); // Normally, we would pass this in, but actually it doesn't make a difference // to the calculation. const bool is_3d = tensor_flags->size[2] > 1; CudaFlagGrid flags = toCudaFlagGrid(state, tensor_flags, is_3d); CudaFlagGrid occupancy = toCudaFlagGrid(state, tensor_occupancy, is_3d); // LaunchKernel args: lua_State, func, domain, args... LaunchKernel(L, &flagsToOccupancy, flags, flags, occupancy); // We could be pedantic and check that the occupancy grid is OK. But this // reduction is very expensive on GPU. // if (THCudaTensor_minall(state, tensor_occupancy) < 0) { // luaL_error(L, "ERROR: unsupported flag cell found!"); // } return 0; } // ***************************************************************************** // velocityUpdateBackward // ***************************************************************************** __global__ void velocityUpdateBackward( CudaFlagGrid flags, CudaMACGrid grad_output, CudaRealGrid grad_p, const int32_t bnd) { int32_t b, chan, k, j, i; if (GetKernelIndices(flags, b, chan, k, j, i)) { return; } if (i < bnd || i > flags.xsize() - 1 - bnd || j < bnd || j > flags.ysize() - 1 - bnd || (flags.is_3d() && (k < bnd || k > flags.zsize() - 1 - bnd))) { // Manta zeros stuff on the border in the forward pass, so they do // not contribute gradient. return; } const CudaVec3 go(grad_output(i, j, k, b)); // TODO(tompson): I'm sure these atomic add calls are slow! We should // probably change this from a scatter to a gather op to avoid having to use // them at all. // (NVIDIA state that atomic operations on global memory are extremely slow) // but on shared memory it is OK. So we could copy to shared first, use // atomic ops there then use a small number of atomic ops back to global mem // (probably rewriting it as a gather would be easier). if (flags.isFluid(i, j, k, b)) { if (flags.isFluid(i - 1, j, k, b)) { atomicAdd(&grad_p(i, j, k, b), -go.x); atomicAdd(&grad_p(i - 1, j, k, b), go.x); } if (flags.isFluid(i, j - 1, k, b)) { atomicAdd(&grad_p(i, j, k, b), -go.y); atomicAdd(&grad_p(i, j - 1, k, b), go.y); } if (flags.is_3d() && flags.isFluid(i, j, k - 1, b)) { atomicAdd(&grad_p(i, j, k, b), -go.z); atomicAdd(&grad_p(i, j, k - 1, b), go.z); } if (flags.isEmpty(i - 1, j, k, b)) { atomicAdd(&grad_p(i, j, k, b), -go.x); } if (flags.isEmpty(i, j - 1, k, b)) { atomicAdd(&grad_p(i, j, k, b), -go.y); } if (flags.is_3d() && flags.isEmpty(i, j, k - 1, b)) { atomicAdd(&grad_p(i, j, k, b), -go.z); } } else if (flags.isEmpty(i, j, k, b) && !flags.isOutflow(i, j, k, b)) { // don't change velocities in outflow cells if (flags.isFluid(i - 1, j, k, b)) { atomicAdd(&grad_p(i - 1, j, k, b), go.x); } else { // Output doesn't depend on p, so gradient is zero and so doesn't // contribute. } if (flags.isFluid(i, j - 1, k, b)) { atomicAdd(&grad_p(i, j - 1, k, b), go.y); } else { // Output doesn't depend on p, so gradient is zero and so doesn't // contribute. } if (flags.is_3d()) { if (flags.isFluid(i, j, k - 1, b)) { atomicAdd(&grad_p(i, j, k - 1, b), go.z); } else { // Output doesn't depend on p, so gradient is zero and so // doesn't contribute. } } } } static int tfluids_CudaMain_velocityUpdateBackward(lua_State* L) { THCState* state = cutorch_getstate(L); // Get the args from the lua stack. NOTE: We do ALL arguments (size checking) // on the lua stack. THCudaTensor* tensor_u = reinterpret_cast<THCudaTensor*>( luaT_checkudata(L, 1, "torch.CudaTensor")); THCudaTensor* tensor_flags = reinterpret_cast<THCudaTensor*>( luaT_checkudata(L, 2, "torch.CudaTensor")); THCudaTensor* tensor_p = reinterpret_cast<THCudaTensor*>( luaT_checkudata(L, 3, "torch.CudaTensor")); THCudaTensor* tensor_grad_output = reinterpret_cast<THCudaTensor*>( luaT_checkudata(L, 4, "torch.CudaTensor")); const bool is_3d = static_cast<bool>(lua_toboolean(L, 5)); THCudaTensor* tensor_grad_p = reinterpret_cast<THCudaTensor*>( luaT_checkudata(L, 6, "torch.CudaTensor")); CudaFlagGrid flags = toCudaFlagGrid(state, tensor_flags, is_3d); CudaMACGrid grad_output = toCudaMACGrid(state, tensor_grad_output, is_3d); CudaRealGrid grad_p = toCudaRealGrid(state, tensor_grad_p, is_3d); // Firstly, we're going to accumulate gradient contributions, so set // grad_p to 0. THCudaTensor_zero(state, tensor_grad_p); const int32_t bnd = 1; // LaunchKernel args: lua_State, func, domain, args... LaunchKernel(L, &velocityUpdateBackward, flags, flags, grad_output, grad_p, bnd); return 0; // Recall: number of return values on the lua stack. } // ***************************************************************************** // volumetricUpsamplingNearestForward // ***************************************************************************** __global__ void volumetricUpSamplingNearestForward( const int ratio, THCDeviceTensor<float, 5> in, THCDeviceTensor<float, 5> out) { const int pnt_id = threadIdx.x + blockIdx.x * blockDim.x; const int chan = blockIdx.y; const int batch = blockIdx.z; if (pnt_id >= (out.getSize(2) * out.getSize(3) * out.getSize(4))) { return; } const int x = pnt_id % out.getSize(4); const int y = (pnt_id / out.getSize(4)) % out.getSize(3); const int z = pnt_id / (out.getSize(3) * out.getSize(4)); const int xin = x / ratio; const int yin = y / ratio; const int zin = z / ratio; const float inVal = in[batch][chan][zin][yin][xin]; out[batch][chan][z][y][x] = inVal; } static int tfluids_CudaMain_volumetricUpSamplingNearestForward(lua_State* L) { THCState* state = cutorch_getstate(L); const int32_t ratio = static_cast<int32_t>(lua_tointeger(L, 1)); THCudaTensor* input = reinterpret_cast<THCudaTensor*>( luaT_checkudata(L, 2, "torch.CudaTensor")); THCudaTensor* output = reinterpret_cast<THCudaTensor*>( luaT_checkudata(L, 3, "torch.CudaTensor")); if (input->nDimension != 5 || output->nDimension != 5) { luaL_error(L, "ERROR: input and output must be dim 5"); } const int32_t nbatch = input->size[0]; const int32_t nfeat = input->size[1]; const int32_t zdim = input->size[2]; const int32_t ydim = input->size[3]; const int32_t xdim = input->size[4]; if (output->size[0] != nbatch || output->size[1] != nfeat || output->size[2] != zdim * ratio || output->size[3] != ydim * ratio || output->size[4] != xdim * ratio) { luaL_error(L, "ERROR: input : output size mismatch."); } THCDeviceTensor<float, 5> dev_in = toDeviceTensor<float, 5>(state, input); THCDeviceTensor<float, 5> dev_out = toDeviceTensor<float, 5>(state, output); if (!THCudaTensor_isContiguous(state, input)) { luaL_error(L, "ERROR: input must be contiguous"); } if (!THCudaTensor_isContiguous(state, output)) { luaL_error(L, "ERROR: output must be contiguous"); } // One thread per output element. int nplane = dev_out.getSize(2) * dev_out.getSize(3) * dev_out.getSize(4); dim3 grid_size(THCCeilDiv(nplane, threads_per_block), dev_out.getSize(1), dev_out.getSize(0)); dim3 block_size(nplane > threads_per_block ? threads_per_block : nplane); volumetricUpSamplingNearestForward<<<grid_size, block_size, 0, THCState_getCurrentStream(state)>>>( ratio, dev_in, dev_out); return 0; } // ***************************************************************************** // volumetricUpsamplingNearestBackward // ***************************************************************************** __global__ void volumetricUpSamplingNearestBackward( const int ratio, THCDeviceTensor<float, 5> grad_out, THCDeviceTensor<float, 5> grad_in) { const int pnt_id = threadIdx.x + blockIdx.x * blockDim.x; const int chan = blockIdx.y; const int batch = blockIdx.z; if (pnt_id >= (grad_in.getSize(2) * grad_in.getSize(3) * grad_in.getSize(4))) { return; } const int x = pnt_id % grad_in.getSize(4); const int y = (pnt_id / grad_in.getSize(4)) % grad_in.getSize(3); const int z = pnt_id / (grad_in.getSize(3) * grad_in.getSize(4)); float sum = 0.0f; // Now accumulate gradients from the upsampling window. for (int32_t zup = 0; zup < ratio; zup++) { for (int32_t yup = 0; yup < ratio; yup++) { for (int32_t xup = 0; xup < ratio; xup++) { const int xin = x * ratio + xup; const int yin = y * ratio + yup; const int zin = z * ratio + zup; const float val = grad_out[batch][chan][zin][yin][xin]; sum += val; } } } grad_in[batch][chan][z][y][x] = sum; } static int tfluids_CudaMain_volumetricUpSamplingNearestBackward(lua_State* L) { THCState* state = cutorch_getstate(L); const int32_t ratio = static_cast<int32_t>(lua_tointeger(L, 1)); THCudaTensor* input = reinterpret_cast<THCudaTensor*>( luaT_checkudata(L, 2, "torch.CudaTensor")); THCudaTensor* grad_output = reinterpret_cast<THCudaTensor*>( luaT_checkudata(L, 3, "torch.CudaTensor")); THCudaTensor* grad_input = reinterpret_cast<THCudaTensor*>( luaT_checkudata(L, 4, "torch.CudaTensor")); if (input->nDimension != 5 || grad_output->nDimension != 5 || grad_input->nDimension != 5) { luaL_error(L, "ERROR: input, gradOutput and gradInput must be dim 5"); } const int32_t nbatch = input->size[0]; const int32_t nfeat = input->size[1]; const int32_t zdim = input->size[2]; const int32_t ydim = input->size[3]; const int32_t xdim = input->size[4]; if (grad_output->size[0] != nbatch || grad_output->size[1] != nfeat || grad_output->size[2] != zdim * ratio || grad_output->size[3] != ydim * ratio || grad_output->size[4] != xdim * ratio) { luaL_error(L, "ERROR: input : gradOutput size mismatch."); } if (grad_input->size[0] != nbatch || grad_input->size[1] != nfeat || grad_input->size[2] != zdim || grad_input->size[3] != ydim || grad_input->size[4] != xdim) { luaL_error(L, "ERROR: input : gradInput size mismatch."); } THCDeviceTensor<float, 5> dev_in = toDeviceTensor<float, 5>(state, input); THCDeviceTensor<float, 5> dev_grad_out = toDeviceTensor<float, 5>( state, grad_output); THCDeviceTensor<float, 5> dev_grad_in = toDeviceTensor<float, 5>( state, grad_input); if (!THCudaTensor_isContiguous(state, input)) { luaL_error(L, "ERROR: input must be contiguous"); } if (!THCudaTensor_isContiguous(state, grad_output)) { luaL_error(L, "ERROR: gradOutput must be contiguous"); } if (!THCudaTensor_isContiguous(state, grad_input)) { luaL_error(L, "ERROR: gradInput must be contiguous"); } // One thread per grad_input element. // TODO(tompson): This is slow. Switch to a looping kernel. int nplane = dev_grad_in.getSize(2) * dev_grad_in.getSize(3) * dev_grad_in.getSize(4); dim3 grid_size(THCCeilDiv(nplane, threads_per_block), dev_grad_in.getSize(1), dev_grad_in.getSize(0)); dim3 block_size(nplane > threads_per_block ? threads_per_block : nplane); volumetricUpSamplingNearestBackward<<<grid_size, block_size, 0, THCState_getCurrentStream(state)>>>( ratio, dev_grad_out, dev_grad_in); return 0; } // ***************************************************************************** // signedDistanceField // ***************************************************************************** __global__ void signedDistanceField( CudaFlagGrid flags, const int search_rad, CudaRealGrid dst) { int b, chan, z, y, x; if (GetKernelIndices(flags, b, chan, z, y, x)) { return; } if (flags.isObstacle(x, y, z, b)) { dst(x, y, z, b) = 0; } float dist_sq = static_cast<float>(search_rad * search_rad); const int zmin = max(0, z - search_rad);; const int zmax = min((int)flags.zsize() - 1, z + search_rad); const int ymin = max(0, y - search_rad);; const int ymax = min((int)flags.ysize() - 1, y + search_rad); const int xmin = max(0, x - search_rad);; const int xmax = min((int)flags.xsize() - 1, x + search_rad); for (int zsearch = zmin; zsearch <= zmax; zsearch++) { for (int ysearch = ymin; ysearch <= ymax; ysearch++) { for (int xsearch = xmin; xsearch <= xmax; xsearch++) { if (flags.isObstacle(xsearch, ysearch, zsearch, b)) { const float cur_dist_sq = ((z - zsearch) * (z - zsearch) + (y - ysearch) * (y - ysearch) + (x - xsearch) * (x - xsearch)); if (dist_sq > cur_dist_sq) { dist_sq = cur_dist_sq; } } } } } dst(x, y, z, b) = sqrt(dist_sq); } static int tfluids_CudaMain_signedDistanceField(lua_State *L) { THCState* state = cutorch_getstate(L); // Get the args from the lua stack. NOTE: We do ALL arguments (size checking) // on the lua stack. THCudaTensor* tensor_flags = reinterpret_cast<THCudaTensor*>( luaT_checkudata(L, 1, "torch.CudaTensor")); const int32_t search_rad = static_cast<int32_t>(lua_tointeger(L, 2)); const bool is_3d = static_cast<bool>(lua_toboolean(L, 3)); THCudaTensor* tensor_dst = reinterpret_cast<THCudaTensor*>( luaT_checkudata(L, 4, "torch.CudaTensor")); CudaFlagGrid flags = toCudaFlagGrid(state, tensor_flags, is_3d); CudaRealGrid dst = toCudaRealGrid(state, tensor_dst, is_3d); // LaunchKernel args: lua_State, func, domain, args... LaunchKernel(L, &signedDistanceField, flags, flags, search_rad, dst); return 0; } //****************************************************************************** // solveLinearSystemPCG //****************************************************************************** static cublasHandle_t cublas_handle = 0; static void init_cublas() { if (cublas_handle == 0) { cublasStatus_t status = cublasCreate(&cublas_handle); if (status != CUBLAS_STATUS_SUCCESS) { THError("CUBLAS Library initialization failed"); } } } static cusparseHandle_t cusparse_handle = 0; static void init_cusparse() { if (cusparse_handle == 0) { cusparseStatus_t status = cusparseCreate(&cusparse_handle); if (status != CUSPARSE_STATUS_SUCCESS) { THError("CUSPARSE Library initialization failed"); } } } // Method from: // stackoverflow.com/questions/30454089/solving-sparse-definite-positive-linear-systems-in-cuda // NOLINT static const char* cusparseGetStatusString(cusparseStatus_t status) { switch (status) { case CUSPARSE_STATUS_SUCCESS: return "CUSPARSE_STATUS_SUCCESS"; case CUSPARSE_STATUS_NOT_INITIALIZED: return "CUSPARSE_STATUS_NOT_INITIALIZED"; case CUSPARSE_STATUS_ALLOC_FAILED: return "CUSPARSE_STATUS_ALLOC_FAILED"; case CUSPARSE_STATUS_INVALID_VALUE: return "CUSPARSE_STATUS_INVALID_VALUE"; case CUSPARSE_STATUS_ARCH_MISMATCH: return "CUSPARSE_STATUS_ARCH_MISMATCH"; case CUSPARSE_STATUS_MAPPING_ERROR: return "CUSPARSE_STATUS_MAPPING_ERROR"; case CUSPARSE_STATUS_EXECUTION_FAILED: return "CUSPARSE_STATUS_EXECUTION_FAILED"; case CUSPARSE_STATUS_INTERNAL_ERROR: return "CUSPARSE_STATUS_INTERNAL_ERROR"; case CUSPARSE_STATUS_MATRIX_TYPE_NOT_SUPPORTED: return "CUSPARSE_STATUS_MATRIX_TYPE_NOT_SUPPORTED"; case CUSPARSE_STATUS_ZERO_PIVOT: return "CUSPARSE_STATUS_ZERO_PIVOT"; default: return "<unknown cusparse error>"; } } #define CHECK_CUSPARSE(expr) checkCusparseStatus((expr), __FILE__, __LINE__) void checkCusparseStatus(cusparseStatus_t stat, char const * file, int line) { if (stat != CUSPARSE_STATUS_SUCCESS) { std::cout << "CUSPARSE error in file '" << file << "', line " << line << ": error(" << stat << "): " << cusparseGetStatusString(stat) << std::endl; } THCudaCheck(cudaGetLastError()); // Sometimes, torch's cuda handle wont catch the error but cusparse enum // is bad. If that's the case, hard fail here. if (stat != CUSPARSE_STATUS_SUCCESS) { THError("CUSPARSE error"); exit(-1); } } static const char* cublasGetStatusString(cublasStatus_t status) { switch (status) { case CUBLAS_STATUS_SUCCESS: return "CUBLAS_STATUS_SUCCESS"; case CUBLAS_STATUS_NOT_INITIALIZED: return "CUBLAS_STATUS_NOT_INITIALIZED"; case CUBLAS_STATUS_ALLOC_FAILED: return "CUBLAS_STATUS_ALLOC_FAILED"; case CUBLAS_STATUS_INVALID_VALUE: return "CUBLAS_STATUS_INVALID_VALUE"; case CUBLAS_STATUS_ARCH_MISMATCH: return "CUBLAS_STATUS_ARCH_MISMATCH"; case CUBLAS_STATUS_MAPPING_ERROR: return "CUBLAS_STATUS_MAPPING_ERROR"; case CUBLAS_STATUS_EXECUTION_FAILED: return "CUBLAS_STATUS_EXECUTION_FAILED"; case CUBLAS_STATUS_INTERNAL_ERROR: return "CUBLAS_STATUS_INTERNAL_ERROR"; default: return "<unknown cublas error>"; } } #define CHECK_CUBLAS(expr) checkCublasStatus((expr), __FILE__, __LINE__) void checkCublasStatus(cublasStatus_t stat, char const * file, int line) { if (stat != CUBLAS_STATUS_SUCCESS) { std::cout << "CUBLAS error in file '" << file << "', line " << line << ": error(" << stat << "): " << cublasGetStatusString(stat) << std::endl; } THCudaCheck(cudaGetLastError()); // Sometimes, torch's cuda handle wont catch the error but cusparse enum // is bad. If that's the case, hard fail here. if (stat != CUBLAS_STATUS_SUCCESS) { THError("CUBLAS error"); exit(-1); } } // These macros require that state be defined, which you can get by calling // cutorch_getstate. #define DEV_PTR(tensor) THCudaTensor_data(state, tensor) #define DEV_INT_PTR(tensor) THCudaIntTensor_data(state, tensor) int64_t createReducedSystemIndices( const tfluids_FloatFlagGrid& flags, THIntTensor* components, THIntTensor* indices, const int32_t ibatch, const int32_t icomponent) { if (indices->nDimension != 3) { THError("indices must be 3D"); } if (components->nDimension != 3) { THError("components must be 3D"); } int64_t cur_index = 0; const int32_t zsize = flags.zsize(); const int32_t ysize = flags.ysize(); const int32_t xsize = flags.xsize(); if ((indices->size[0] != zsize || indices->size[1] != ysize || indices->size[2] != xsize)) { THError("indices must be the same dimension as flags (non-batched)"); } for (int32_t k = 0; k < zsize; k++) { for (int32_t j = 0; j < ysize; j++) { for (int32_t i = 0; i < xsize; i++) { if (THIntTensor_get3d(components, k, j, i) == icomponent) { // Note, if it's part of the connected component of fluid cells then // we don't have to check if it's fluid. However we should do anyway // just to make sure. if (!flags.isFluid(i, j, k, ibatch)) { THError("A non fluid component was found!"); } THIntTensor_set3d(indices, k, j, i, cur_index); cur_index++; } else { THIntTensor_set3d(indices, k, j, i, -1); } } } } return cur_index; } // @param I, col, and val: the output CSR formatted sparse matrix. // TOTO(tompson): This is super slow. All the _get and _set methods do bounds // checking. Once everything is working, switch to raw ptrs. static int64_t setupLaplacian( const tfluids_FloatFlagGrid& flags, const int32_t b, THIntTensor* row, THIntTensor* col, THFloatTensor *val, const bool upper_triangular, THIntTensor* system_indices, THIntTensor* components, const int icomponent) { // row stores the indices of the first non-zero item in the col and val // arrays. (i.e. col[row[n]] is the column index of the 1st element of row n. // and val[row[4]] is the corresponding value. // The number of non-zero values in each row is given by row[n + 1] - row[n]. // Hence the need to have (dim + 1) row values. int64_t current_row = 0; int64_t val_index = 0; THIntTensor_set1d(row, current_row, 0); // 0th row starts at 0th index. // TODO(tompson): Parallelize this. const int32_t zsize = flags.zsize(); const int32_t ysize = flags.ysize(); const int32_t xsize = flags.xsize(); const int32_t bnd = 1; for (int32_t k = 0; k < zsize; k++) { for (int32_t j = 0; j < ysize; j++) { for (int32_t i = 0; i < xsize; i++) { if (THIntTensor_get3d(components, k, j, i) != icomponent) { // Not part of the current connected component. // Make sure the current cell wasn't assigned an index in the output // system. if (THIntTensor_get3d(system_indices, k, j, i) != -1) { THError("Non fluid cell shouldn't have an index!"); } continue; } const bool out_of_bounds = (i < bnd || i > xsize - 1 - bnd || j < bnd || j > ysize - 1 - bnd || (flags.is_3d() && (k < bnd || k > zsize - 1 - bnd))); // As per Manta's convention, the border are all obstacle cells. // Therefore their divergence (rhs) is zero. AND the do not contribute // non-zero elements to the sparse matrix. As such, we just skip // over them. // Technically the isFluid check here is completely redundant (since // it's part of a component), but lets do it anyway for clarity). if (!out_of_bounds && flags.isFluid(i, j, k, b)) { // Collect the diagonal term first. The diag term is the sum of // NON obstacle cells. In most cases this is the same as fluid cells, // but empty cells also contribute flow. float val_diagonal = 0.0f; if (!flags.isObstacle(i - 1, j, k, b)) { val_diagonal += 1; } if (!flags.isObstacle(i + 1, j, k, b)) { val_diagonal += 1; } if (!flags.isObstacle(i, j - 1, k, b)) { val_diagonal += 1; } if (!flags.isObstacle(i, j + 1, k, b)) { val_diagonal += 1; } if (flags.is_3d() && !flags.isObstacle(i, j, k - 1, b)) { val_diagonal += 1; } if (flags.is_3d() && !flags.isObstacle(i, j, k + 1, b)) { val_diagonal += 1; } // Off diagonal entries. float im1jk = 0.0f; if (!upper_triangular && flags.isFluid(i - 1, j, k, b)) { im1jk = -1.0f; // Off diagonal entry for fluid neighbors is -1. } float ip1jk = 0.0f; if (flags.isFluid(i + 1, j, k, b)) { ip1jk = -1.0f; } float ijm1k = 0.0f; if (!upper_triangular && flags.isFluid(i, j - 1, k, b)) { ijm1k = -1.0f; } float ijp1k = 0.0f; if (flags.isFluid(i, j + 1, k, b)) { ijp1k = -1.0f; } float ijkm1 = 0.0f; float ijkp1 = 0.0f; if (flags.is_3d()) { if (!upper_triangular && flags.isFluid(i, j, k - 1, b)) { ijkm1 = -1.0f; } if (flags.isFluid(i, j, k + 1, b)) { ijkp1 = -1.0f; } } // Set the matrix values now. Setting values in increasing index // order as it is done this way by the denseToCSR. // Also every example I have seen does it this way. if (ijkm1 != 0.0f) { // We can't just use the flat index (x + (y * w) + (z * w * h)) // as the column index because we're operating on a reduced system. // Therefore we need to look up the system_index. const int isys = THIntTensor_get3d(system_indices, k - 1, j, i); if (isys < 0) { THError("system index is not defined!"); } THFloatTensor_set1d(val, val_index, ijkm1); THIntTensor_set1d(col, val_index, isys); val_index++; // increment the val and col place } if (ijm1k != 0.0f) { const int isys = THIntTensor_get3d(system_indices, k, j - 1, i); if (isys < 0) { THError("system index is not defined!"); } THFloatTensor_set1d(val, val_index, ijm1k); THIntTensor_set1d(col, val_index, isys); val_index++; } if (im1jk != 0.0f) { const int isys = THIntTensor_get3d(system_indices, k, j, i - 1); if (isys < 0) { THError("system index is not defined!"); } THFloatTensor_set1d(val, val_index, im1jk); THIntTensor_set1d(col, val_index, isys); val_index++; } { // For scoping of isys. const int isys = THIntTensor_get3d(system_indices, k, j, i); if (isys < 0) { THError("system index is not defined!"); } THFloatTensor_set1d(val, val_index, val_diagonal); THIntTensor_set1d(col, val_index, isys); val_index++; } if (ip1jk != 0.0f) { const int isys = THIntTensor_get3d(system_indices, k, j, i + 1); if (isys < 0) { THError("system index is not defined!"); } THFloatTensor_set1d(val, val_index, ip1jk); THIntTensor_set1d(col, val_index, isys); val_index++; } if (ijp1k != 0.0f) { const int isys = THIntTensor_get3d(system_indices, k, j + 1, i); if (isys < 0) { THError("system index is not defined!"); } THFloatTensor_set1d(val, val_index, ijp1k); THIntTensor_set1d(col, val_index, isys); val_index++; } if (ijkp1 != 0.0f) { const int isys = THIntTensor_get3d(system_indices, k + 1, j, i); if (isys < 0) { THError("system index is not defined!"); } THFloatTensor_set1d(val, val_index, ijkp1); THIntTensor_set1d(col, val_index, isys); val_index++; } current_row++; THIntTensor_set1d(row, current_row, val_index); } else { // isFluid & inBounds // We shouldn't have got here. All cells in a component should be // fluid cells. std::cout << "Non fluid cell found in a connected component or " << "fluid cell found on the domain border:" << " flags(i, j, k, b) = " << flags(i, j, k, b) << std::endl; // TODO(tompson): Manta always has 1 solid component on the border, // but should we allow it? THError("Non fluid cell found in a connected component"); } } } } return val_index; // Return number of non-zero entries in the matrix A. } // allocTempTensor expects a lua table on the stack in index 1, that we will // store a bunch of temporary tensors. We will allocate these on demand, i.e. // we will return the existing tensors if they exist, else we will create a new // one. template <typename TensorType> TensorType* allocTempTensor(lua_State* L, const char* name, const char* typeStr, TensorType* (*newFunc)()) { TensorType* tensor = nullptr; luaL_checktype(L, 1, LUA_TTABLE); lua_getfield(L, 1, name); // Stack now has: // 2, 3, 4, ...: Rest of args to c function. // 1: tfluids._tmpPCG // -1: tfluids._tmpPCG[name] if (lua_isnil(L, -1)) { lua_pop(L, 1); // Pop the nil. // Create a new tensor. tensor = newFunc(); // Push the new tensor into the table. lua_pushstring(L, name); luaT_pushudata(L, (void *)tensor, typeStr); lua_settable(L, 1); // Note: pops both key and value. } else { // Get the value. tensor = reinterpret_cast<TensorType*>(luaT_checkudata(L, -1, typeStr)); // Pop the tensor from the stack. lua_pop(L, 1); } return tensor; } // allocTempCudaTensor is the same as above, except annoyingly the 'new' func // signature is differnt and there's no easy way to template the function ptr // without wrapping it with a static number of arguments. It's easier just // to define two methods, even though it's replicated code. template <typename TensorType> TensorType* allocTempCudaTensor(lua_State* L, const char* name, const char* typeStr, TensorType* (*newFunc)(THCState*)) { TensorType* tensor = nullptr; luaL_checktype(L, 1, LUA_TTABLE); lua_getfield(L, 1, name); if (lua_isnil(L, -1)) { lua_pop(L, 1); tensor = newFunc(cutorch_getstate(L)); lua_pushstring(L, name); luaT_pushudata(L, (void *)tensor, typeStr); lua_settable(L, 1); } else { tensor = reinterpret_cast<TensorType*>(luaT_checkudata(L, -1, typeStr)); lua_pop(L, 1); } return tensor; } // calpToEpsilon clamps to positive or negative epsilon depending on sign.. inline float clampToEpsilon(const float val, const float epsilon) { if (std::abs(val) < epsilon) { if (val < 0) { return std::min(val, -epsilon); } else { return std::max(val, epsilon); } } else { return val; } } __global__ void copyPressureFromSystem( THCDeviceTensor<int, 3> system_indices, THCDeviceTensor<float, 1> pressure_pcg, CudaRealGrid pressure, const int32_t bout, const float mean) { const int32_t xsize = system_indices.getSize(2); const int32_t ysize = system_indices.getSize(1); const int32_t zsize = system_indices.getSize(0); int32_t b, chan, k, j, i; // b and chan will always be zero (because we call this on the non-batched // tensor). if (GetKernelIndices(1, 1, zsize, ysize, xsize, b, chan, k, j, i)) { return; } // Look up the system index for the current voxel / pixel. int ind = system_indices[k][j][i]; if (ind < 0) { // This pixel wasn't in the linear system (it's a non-fluid cell). // The output pressure will be set to zero (but not here since we don't // want to overwrite a cell not on our connected component. } else { pressure(i, j, k, bout) = pressure_pcg[ind] - mean; } } __global__ void copyDivergenceToSystem( THCDeviceTensor<int, 3> system_indices, THCDeviceTensor<float, 1> div_pcg, CudaRealGrid div, const int32_t ibatch) { const int32_t xsize = system_indices.getSize(2); const int32_t ysize = system_indices.getSize(1); const int32_t zsize = system_indices.getSize(0); int32_t b, chan, k, j, i; // b and chan will always be zero (because we call this on the non-batched // tensor). if (GetKernelIndices(1, 1, zsize, ysize, xsize, b, chan, k, j, i)) { return; } // Look up the system index for the current voxel / pixel. const int ind = system_indices[k][j][i]; if (ind >= 0) { // Fluid cell (so it's in the system), copy the divergence. div_pcg[ind] = div(i, j, k, ibatch); } } typedef enum { PRECOND_NONE, PRECOND_ILU0, PRECOND_IC0, } PrecondType; PrecondType StringToPrecondType(lua_State* L, const std::string& precond_type_str) { if (precond_type_str == "none") { return PRECOND_NONE; } else if (precond_type_str == "ilu0") { return PRECOND_ILU0; } else if (precond_type_str == "ic0") { return PRECOND_IC0; } else { luaL_error(L, "precondType is not supported."); return PRECOND_NONE; } } std::string PrecondTypeToString(const PrecondType precond_type) { switch (precond_type) { case PRECOND_NONE: return "none"; case PRECOND_ILU0: return "ilu0"; case PRECOND_IC0: return "ic0"; default: THError("Incorrect precond enum type."); exit(-1); } } static int tfluids_CudaMain_solveLinearSystemPCG(lua_State* L) { init_cublas(); // No op if already initialized. init_cusparse(); // No op if already initialized. THCState* state = cutorch_getstate(L); luaL_checktype(L, 1, LUA_TTABLE); // The first argument should be a table. THCudaTensor* p = reinterpret_cast<THCudaTensor*>( luaT_checkudata(L, 2, "torch.CudaTensor")); THCudaTensor* flags_gpu = reinterpret_cast<THCudaTensor*>( luaT_checkudata(L, 3, "torch.CudaTensor")); THCudaTensor* div = reinterpret_cast<THCudaTensor*>( luaT_checkudata(L, 4, "torch.CudaTensor")); const bool is_3d = static_cast<bool>(lua_toboolean(L, 5)); const std::string precond_type_str = static_cast<std::string>(lua_tostring(L, 6)); const float tol = static_cast<float>(lua_tonumber(L, 7)); const int64_t max_iter = static_cast<int64_t>(lua_tointeger(L, 8)); const bool verbose = static_cast<bool>(lua_toboolean(L, 9)); // This PCG routine uses a LOT of temporary storage. We will create tensors // in the tfluids._tmpPCG namespace (table) that are static to the library. // This means the tensors stick around between subsequent calls and are // resized on demand. // The remaining are temporary storage allocated on the lua side but static // to the library. They stick around but are resized on demand. THIntTensor* system_indices_cpu = allocTempTensor<THIntTensor>( L, "systemIndicesCPU", "torch.IntTensor", &THIntTensor_new); THIntTensor* row_cpu = allocTempTensor<THIntTensor>( L, "rowCPU", "torch.IntTensor", &THIntTensor_new); THIntTensor* components = allocTempTensor<THIntTensor>( L, "componentsCPU", "torch.IntTensor", &THIntTensor_new); THIntTensor* col_cpu = allocTempTensor<THIntTensor>( L, "colCPU", "torch.IntTensor", &THIntTensor_new); THFloatTensor* val_cpu = allocTempTensor<THFloatTensor>( L, "valCPU", "torch.FloatTensor", &THFloatTensor_new); THFloatTensor* flags_cpu = allocTempTensor<THFloatTensor>( L, "flagsCPU", "torch.FloatTensor", &THFloatTensor_new); THCudaIntTensor* row_gpu = allocTempCudaTensor<THCudaIntTensor>( L, "rowGPU", "torch.CudaIntTensor", &THCudaIntTensor_new); THCudaIntTensor* col_gpu = allocTempCudaTensor<THCudaIntTensor>( L, "colGPU", "torch.CudaIntTensor", &THCudaIntTensor_new); THCudaTensor* val_gpu = allocTempCudaTensor<THCudaTensor>( L, "valGPU", "torch.CudaTensor", &THCudaTensor_new); // TODO(tompson): I'm not convinced we need half of these. THCudaTensor* rhs_gpu = allocTempCudaTensor<THCudaTensor>( L, "rhsGPU", "torch.CudaTensor", &THCudaTensor_new); THCudaTensor* r_gpu = allocTempCudaTensor<THCudaTensor>( L, "rGPU", "torch.CudaTensor", &THCudaTensor_new); // residual vector THCudaTensor* val_precond_gpu = allocTempCudaTensor<THCudaTensor>( L, "valILU0GPU", "torch.CudaTensor", &THCudaTensor_new); THCudaTensor* x_gpu = allocTempCudaTensor<THCudaTensor>( L, "xGPU", "torch.CudaTensor", &THCudaTensor_new); THCudaTensor* d_gpu = allocTempCudaTensor<THCudaTensor>( L, "dGPU", "torch.CudaTensor", &THCudaTensor_new); THCudaTensor* y_gpu = allocTempCudaTensor<THCudaTensor>( L, "yGPU", "torch.CudaTensor", &THCudaTensor_new); THCudaTensor* zm1_gpu = allocTempCudaTensor<THCudaTensor>( L, "zm1GPU", "torch.CudaTensor", &THCudaTensor_new); THCudaTensor* zm2_gpu = allocTempCudaTensor<THCudaTensor>( L, "zm2GPU", "torch.CudaTensor", &THCudaTensor_new); THCudaTensor* p_gpu = allocTempCudaTensor<THCudaTensor>( // Search direction. L, "pGPU", "torch.CudaTensor", &THCudaTensor_new); THCudaTensor* omega_gpu = allocTempCudaTensor<THCudaTensor>( L, "omegaGPU", "torch.CudaTensor", &THCudaTensor_new); THCudaTensor* rm2_gpu = allocTempCudaTensor<THCudaTensor>( L, "rm2GPU", "torch.CudaTensor", &THCudaTensor_new); THCudaIntTensor* system_indices_gpu = allocTempCudaTensor<THCudaIntTensor>( L, "systemIndicesGPU", "torch.CudaIntTensor", &THCudaIntTensor_new); // We need the FLAG grid on the CPU, because that's where we're going to // construct the sparse matrix (Laplacian). THFloatTensor_resize5d(flags_cpu, flags_gpu->size[0], flags_gpu->size[1], flags_gpu->size[2], flags_gpu->size[3], flags_gpu->size[4]); THFloatTensor_copyCuda(state, flags_cpu, flags_gpu); // flags_cpu = flags_gpu tfluids_FloatFlagGrid flags(flags_cpu, is_3d); CudaRealGrid pressure = toCudaRealGrid(state, p, is_3d); CudaRealGrid divergence = toCudaRealGrid(state, div, is_3d); // Zero the pressure everywhere, this will zero out the pressure of the // non-fluid and empty region cells (because we don't touch them during the // pressure solve). THCudaTensor_zero(state, p); const int32_t xsize = flags.xsize(); const int32_t ysize = flags.ysize(); const int32_t zsize = flags.zsize(); const int32_t nbatch = flags.nbatch(); // We wont parallelize over batches, but process each sequentially. // TODO(tompson): We could at least parallelize all the Laplacian setups // over batch. float max_residual = -std::numeric_limits<float>::infinity(); for (int32_t ibatch = 0; ibatch < nbatch; ibatch++) { // Find connected components of fluid regions. If we combine these into a // single system then it will be singular (with non-positive pivot) and ICU0 // preconditioner will fail. Bridson talks about enforcing compatibility // conditioner by adding the null-space components to the RHS, this is one // solution. Another solution is to solve M PCG problems for each of the M // components (this is what we'll do). THIntTensor_resize3d(components, zsize, ysize, xsize); std::vector<int32_t> component_sizes; std::vector<Int3> single_components; const int32_t ncomponents = findConnectedFluidComponents( flags, components, ibatch, &component_sizes); // Now solve ncomponents linear systems. for (int32_t icomponent = 0; icomponent < ncomponents; icomponent++) { PrecondType precond_type = StringToPrecondType(L, precond_type_str); if (component_sizes[icomponent] == 1) { // Single components will not have a valid solution. Leave the pressure // at zero. if (verbose) { std::cout << "PCG batch " << (ibatch + 1) << " component " << (icomponent + 1) << " has size 1, skipping." << std::endl; } continue; } else { if (verbose) { std::cout << "PCG batch " << (ibatch + 1) << " component " << (icomponent + 1) << " has size " << component_sizes[icomponent] << "." << std::endl; } if (component_sizes[icomponent] < 5) { // Don't use a preconditioner, it's slower. precond_type = PRECOND_NONE; } } if (verbose) { std::cout << "PCG: " << (ibatch + 1) << " component " << (icomponent + 1) << " using precond type " << PrecondTypeToString(precond_type) << std::endl; } // We're going to create the sparse laplacian next, but we don't want all // zero rows (caused by obstacle cells). It guarantees that A is singular. // it causes issues with the preconditioner in cusparse, and it is // inefficient. Therefore we need to scan through the dataset and create // a map of fluid cells, with indices into our new system. THIntTensor_resize3d(system_indices_cpu, zsize, ysize, xsize); THCudaIntTensor_resize3d(state, system_indices_gpu, zsize, ysize, xsize); const int64_t numel = createReducedSystemIndices( flags, components, system_indices_cpu, ibatch, icomponent); // While we're at it, copy these system indices to the GPU (we'll need // them later). THCudaIntTensor_copyInt(state, system_indices_gpu, system_indices_cpu); // Recall: resize ops are a no-op if the storage shrinks or stays the // same. // Note: here we'll allocate the col and val arrays to the maximum // possible size (6 neighbors + 1 diagonal = 7). This would be for a // domain of all fluid cells, where we also include the border (which like // Manta we do not) and if the border cells had neighbors outside (which // they do not). So actually this is a conservative sizing. // If this is a problem we can always do two passes, one to get the number // of non-zero values, and one to fill them (as @kristofe used to do). THIntTensor_resize1d(row_cpu, numel + 1); THIntTensor_resize1d(col_cpu, numel * 7); THFloatTensor_resize1d(val_cpu, numel * 7); const bool upper_tri = precond_type == PRECOND_IC0; const int64_t nz = setupLaplacian(flags, ibatch, row_cpu, col_cpu, val_cpu, upper_tri, system_indices_cpu, components, icomponent); if (nz > col_cpu->size[0]) { luaL_error(L, "INTERNAL ERROR: num of non-zero elements is too large!."); } // Copy the sparse matrix values to the GPU, this time we'll only allocate // the number of non-zero values needed. THCudaIntTensor_resize1d(state, row_gpu, numel + 1); THCudaIntTensor_copyInt(state, row_gpu, row_cpu); THCudaIntTensor_resize1d(state, col_gpu, nz); { // Wrap for scoping of col_cpu_nz. // Recall: newNarrow(tensor, dim, first_index, size). THIntTensor* col_cpu_nz = THIntTensor_newNarrow(col_cpu, 0, 0, nz); THCudaIntTensor_copyInt(state, col_gpu, col_cpu_nz); THIntTensor_free(col_cpu_nz); } THCudaTensor_resize1d(state, val_gpu, nz); { // Wrap for scoping of val_cpu_nz. THFloatTensor* val_cpu_nz = THFloatTensor_newNarrow(val_cpu, 0, 0, nz); THCudaTensor_copyFloat(state, val_gpu, val_cpu_nz); THFloatTensor_free(val_cpu_nz); } // Create a description in cusparse of the A matrix that we've // created (the val, row and col values above). cusparseMatDescr_t descr = 0; CHECK_CUSPARSE(cusparseCreateMatDescr(&descr)); if (precond_type == PRECOND_IC0) { CHECK_CUSPARSE(cusparseSetMatFillMode(descr, CUSPARSE_FILL_MODE_UPPER)); CHECK_CUSPARSE(cusparseSetMatType(descr, CUSPARSE_MATRIX_TYPE_SYMMETRIC)); } else { CHECK_CUSPARSE(cusparseSetMatType(descr, CUSPARSE_MATRIX_TYPE_GENERAL)); } CHECK_CUSPARSE(cusparseSetMatIndexBase(descr, CUSPARSE_INDEX_BASE_ZERO)); // Also copy the rhs (aka div) to the 'b' tensor with reduced indices // and at the current batch index. THCudaTensor_resize1d(state, rhs_gpu, numel); THCDeviceTensor<int, 3> dev_inds = toDeviceTensor<int, 3>(state, system_indices_gpu); THCDeviceTensor<float, 1> dev_rhs = toDeviceTensor<float, 1>(state, rhs_gpu); LaunchKernel(L, &copyDivergenceToSystem, 1, 1, zsize, ysize, xsize, dev_inds, dev_rhs, divergence, ibatch); // Generate the Preconditioner. // Create the analysis info object for the A matrix. cusparseSolveAnalysisInfo_t info_a = 0; cusparseSolveAnalysisInfo_t info_u = 0; cusparseSolveAnalysisInfo_t info_ut = 0; // Only used by ic0. cusparseMatDescr_t descr_l = 0; cusparseMatDescr_t descr_u = 0; if (precond_type != PRECOND_NONE) { THCudaTensor_resize1d(state, val_precond_gpu, nz); THCudaTensor_copy(state, val_precond_gpu, val_gpu); CHECK_CUSPARSE(cusparseCreateSolveAnalysisInfo(&info_a)); CHECK_CUSPARSE(cusparseScsrsv_analysis( cusparse_handle, CUSPARSE_OPERATION_NON_TRANSPOSE, numel, nz, descr, DEV_PTR(val_gpu), DEV_INT_PTR(row_gpu), DEV_INT_PTR(col_gpu), info_a)); if (precond_type == PRECOND_ILU0) { if (verbose) { std::cout << "PCG: Generating ILU0 preconditioner." << std::endl; } // Generate the Incomplete LU factor H for the matrix A. CHECK_CUSPARSE(cusparseScsrilu0( cusparse_handle, CUSPARSE_OPERATION_NON_TRANSPOSE, numel, descr, DEV_PTR(val_precond_gpu), DEV_INT_PTR(row_gpu), DEV_INT_PTR(col_gpu), info_a)); // Create info objects for the ILU0 preconditioner. CHECK_CUSPARSE(cusparseCreateSolveAnalysisInfo(&info_u)); CHECK_CUSPARSE(cusparseCreateMatDescr(&descr_l)); CHECK_CUSPARSE(cusparseSetMatType( descr_l, CUSPARSE_MATRIX_TYPE_GENERAL)); CHECK_CUSPARSE(cusparseSetMatIndexBase( descr_l, CUSPARSE_INDEX_BASE_ZERO)); CHECK_CUSPARSE(cusparseSetMatFillMode( descr_l, CUSPARSE_FILL_MODE_LOWER)); CHECK_CUSPARSE(cusparseSetMatDiagType( descr_l, CUSPARSE_DIAG_TYPE_UNIT)); CHECK_CUSPARSE(cusparseCreateMatDescr(&descr_u)); CHECK_CUSPARSE(cusparseSetMatType( descr_u, CUSPARSE_MATRIX_TYPE_GENERAL)); CHECK_CUSPARSE(cusparseSetMatIndexBase( descr_u, CUSPARSE_INDEX_BASE_ZERO)); CHECK_CUSPARSE(cusparseSetMatFillMode( descr_u, CUSPARSE_FILL_MODE_UPPER)); CHECK_CUSPARSE(cusparseSetMatDiagType( descr_u, CUSPARSE_DIAG_TYPE_NON_UNIT)); CHECK_CUSPARSE(cusparseScsrsv_analysis( cusparse_handle, CUSPARSE_OPERATION_NON_TRANSPOSE, numel, nz, descr_u, DEV_PTR(val_gpu), DEV_INT_PTR(row_gpu), DEV_INT_PTR(col_gpu), info_u)); } else if (precond_type == PRECOND_IC0) { if (verbose) { std::cout << "PCG: Generating IC0 preconditioner." << std::endl; } CHECK_CUSPARSE(cusparseScsric0( cusparse_handle, CUSPARSE_OPERATION_NON_TRANSPOSE, numel, descr, DEV_PTR(val_precond_gpu), DEV_INT_PTR(row_gpu), DEV_INT_PTR(col_gpu), info_a)); CHECK_CUSPARSE(cusparseCreateMatDescr(&descr_u)); CHECK_CUSPARSE(cusparseSetMatType( descr_u, CUSPARSE_MATRIX_TYPE_TRIANGULAR)); CHECK_CUSPARSE(cusparseSetMatIndexBase( descr_u, CUSPARSE_INDEX_BASE_ZERO)); CHECK_CUSPARSE(cusparseSetMatFillMode( descr_u, CUSPARSE_FILL_MODE_UPPER)); CHECK_CUSPARSE(cusparseCreateSolveAnalysisInfo(&info_ut)); CHECK_CUSPARSE(cusparseScsrsv_analysis( cusparse_handle, CUSPARSE_OPERATION_TRANSPOSE, numel, nz, descr_u, DEV_PTR(val_precond_gpu), DEV_INT_PTR(row_gpu), DEV_INT_PTR(col_gpu), info_ut)); CHECK_CUSPARSE(cusparseCreateSolveAnalysisInfo(&info_u)); CHECK_CUSPARSE(cusparseScsrsv_analysis( cusparse_handle, CUSPARSE_OPERATION_NON_TRANSPOSE, numel, nz, descr_u, DEV_PTR(val_precond_gpu), DEV_INT_PTR(row_gpu), DEV_INT_PTR(col_gpu), info_u)); } else { luaL_error(L, "Incorrect preconType ('none', 'ic0', 'ilu0')"); } } // While we're at it, set the pressure value to a zero value (note we // could also use the previous frame's pressure). This is CG's initial // guess. THCudaTensor_resize1d(state, x_gpu, numel); THCudaTensor_zero(state, x_gpu); // TODO(tompson): Move all these to the start of the function. THCudaTensor_resize1d(state, y_gpu, numel); THCudaTensor_resize1d(state, p_gpu, numel); THCudaTensor_resize1d(state, omega_gpu, numel); THCudaTensor_resize1d(state, zm1_gpu, numel); THCudaTensor_resize1d(state, zm2_gpu, numel); THCudaTensor_resize1d(state, rm2_gpu, numel); // TODO(tompson): Do we need yet another copy of the RHS? // The algorithm we're implementing here is from Matrix Computations, // Golub and Van Loan, Algorithm 10.3.1: int32_t iter = 0; float r_norm_sq1; // r_norm_sq1 is the current residual float r_norm_sq0; // r_norm_sq0 is the previous iteration's residual // Since we start with x = 0, the initial residual is just the norm of the // rhs (i.e. residual = ||rhs - A * x|| = ||rhs||) THCudaTensor_resize1d(state, r_gpu, numel); // residual THCudaTensor_copy(state, r_gpu, rhs_gpu); CHECK_CUBLAS(cublasSdot(cublas_handle, numel, DEV_PTR(r_gpu), 1, DEV_PTR(r_gpu), 1, &r_norm_sq1)); if (isnan(r_norm_sq1)) { luaL_error(L, "PCG Error: starting residual is nan!"); } const float one = 1.0f; const float zero = 0.0f; float numerator; float denominator; float beta; float alpha; float nalpha; if (verbose) { std::cout << "PCG batch " << (ibatch + 1) << " comp " << (icomponent + 1) << ": starting residual " << std::sqrt(r_norm_sq1) << " (tol " << tol << ", precondType = " << PrecondTypeToString(precond_type) << ")" << std::endl; } // epsilon ~= 1e-38 (just prevents divide by zero). const float epsilon = std::numeric_limits<float>::min(); while (r_norm_sq1 > tol * tol && iter <= max_iter) { if (precond_type == PRECOND_ILU0) { // Solve M * z_k = r_k CHECK_CUSPARSE(cusparseScsrsv_solve( cusparse_handle, CUSPARSE_OPERATION_NON_TRANSPOSE, numel, &one, descr_l, DEV_PTR(val_precond_gpu), DEV_INT_PTR(row_gpu), DEV_INT_PTR(col_gpu), info_a, DEV_PTR(r_gpu), DEV_PTR(y_gpu))); CHECK_CUSPARSE(cusparseScsrsv_solve( cusparse_handle, CUSPARSE_OPERATION_NON_TRANSPOSE, numel, &one, descr_u, DEV_PTR(val_precond_gpu), DEV_INT_PTR(row_gpu), DEV_INT_PTR(col_gpu), info_u, DEV_PTR(y_gpu), DEV_PTR(zm1_gpu))); } else if (precond_type == PRECOND_IC0) { CHECK_CUSPARSE(cusparseScsrsv_solve( cusparse_handle, CUSPARSE_OPERATION_TRANSPOSE, numel, &one, descr_u, DEV_PTR(val_precond_gpu), DEV_INT_PTR(row_gpu), DEV_INT_PTR(col_gpu), info_ut, DEV_PTR(r_gpu), DEV_PTR(y_gpu))); CHECK_CUSPARSE(cusparseScsrsv_solve( cusparse_handle, CUSPARSE_OPERATION_NON_TRANSPOSE, numel, &one, descr_u, DEV_PTR(val_precond_gpu), DEV_INT_PTR(row_gpu), DEV_INT_PTR(col_gpu), info_u, DEV_PTR(y_gpu), DEV_PTR(zm1_gpu))); } iter++; // k = k + 1 // Calculate the next search direction p_k. if (iter == 1) { if (precond_type != PRECOND_NONE) { THCudaTensor_copy(state, p_gpu, zm1_gpu); // p_1 = z_0 } else { THCudaTensor_copy(state, p_gpu, r_gpu); // p_1 = r_0 } } else { if (precond_type != PRECOND_NONE) { // beta_k = r_{k_1}^T * z_{k - 1} / (r_{k-2}^T * z_{k - 2}) CHECK_CUBLAS(cublasSdot(cublas_handle, numel, DEV_PTR(r_gpu), 1, DEV_PTR(zm1_gpu), 1, &numerator)); CHECK_CUBLAS(cublasSdot(cublas_handle, numel, DEV_PTR(rm2_gpu), 1, DEV_PTR(zm2_gpu), 1, &denominator)); beta = numerator / clampToEpsilon(denominator, epsilon); // p_k = z_{k - 1} + beta_k * p_{k - 1} CHECK_CUBLAS(cublasSscal( cublas_handle, numel, &beta, DEV_PTR(p_gpu), 1)); CHECK_CUBLAS(cublasSaxpy( cublas_handle, numel, &one, DEV_PTR(zm1_gpu), 1, DEV_PTR(p_gpu), 1)); } else { beta = r_norm_sq1 / clampToEpsilon(r_norm_sq0, epsilon); CHECK_CUBLAS(cublasSscal(cublas_handle, numel, &beta, DEV_PTR(p_gpu), 1)); CHECK_CUBLAS(cublasSaxpy(cublas_handle, numel, &one, DEV_PTR(r_gpu), 1, DEV_PTR(p_gpu), 1)); } } // alpha_k = r_{k-1}^T * z_{k - 1} / (p_k^T * A * p_k) // omega_k = A * p_k. // Recall: cusparseScsrmv is a sparse matrix-vec + const operator. // TODO(tompson): should the sparse descr be descr_u? CHECK_CUSPARSE(cusparseScsrmv( cusparse_handle, CUSPARSE_OPERATION_NON_TRANSPOSE, numel, numel, nz, &one, descr, DEV_PTR(val_gpu), DEV_INT_PTR(row_gpu), DEV_INT_PTR(col_gpu), DEV_PTR(p_gpu), &zero, DEV_PTR(omega_gpu))); if (precond_type != PRECOND_NONE) { // numerator = r_{k-1}^T * z_{k - 1} CHECK_CUBLAS(cublasSdot(cublas_handle, numel, DEV_PTR(r_gpu), 1, DEV_PTR(zm1_gpu), 1, &numerator)); // denominator = p_k^T * A * p_k = p_k^T * omega_k CHECK_CUBLAS(cublasSdot(cublas_handle, numel, DEV_PTR(p_gpu), 1, DEV_PTR(omega_gpu), 1, &denominator)); } else { numerator = r_norm_sq1; CHECK_CUBLAS(cublasSdot(cublas_handle, numel, DEV_PTR(p_gpu), 1, DEV_PTR(omega_gpu), 1, &denominator)); } alpha = numerator / clampToEpsilon(denominator, epsilon); // x_k = x_{k - 1} + alpha_k * p_k // Recall: cublasSaxpy(handle, n, alpha, x, incx, y, incy) performs: // --> y [ j ] = alpha × x [ k ] + y [ j ] CHECK_CUBLAS(cublasSaxpy( cublas_handle, numel, &alpha, DEV_PTR(p_gpu), 1, DEV_PTR(x_gpu), 1)); if (precond_type != PRECOND_NONE) { THCudaTensor_copy(state, rm2_gpu, r_gpu); // rm2_gpu = r_gpu THCudaTensor_copy(state, zm2_gpu, zm1_gpu); // zm2_gpu = zm1_gpu } nalpha = -alpha; // According to Shewchuck we should re-init r every 50 iterations. // EDIT(tompson): It doesn't seem to help (removed but in git history). // r_k = r_{k - 1} - alpha_k * A * p_k = r_{k - 1} - alpha_k * omega_k CHECK_CUBLAS(cublasSaxpy(cublas_handle, numel, &nalpha, DEV_PTR(omega_gpu), 1, DEV_PTR(r_gpu), 1)); r_norm_sq0 = r_norm_sq1; // Update previous residual. // Finally, calculate the new residual. CHECK_CUBLAS(cublasSdot(cublas_handle, numel, DEV_PTR(r_gpu), 1, DEV_PTR(r_gpu), 1, &r_norm_sq1)); if (verbose) { std::cout << "PCG batch " << (ibatch + 1) << " comp " << (icomponent + 1) << " iter " << iter << ": residual " << std::sqrt(r_norm_sq1) << " (tol " << tol << ", precondType = " << PrecondTypeToString(precond_type) << ")" << std::endl; } if (isnan(r_norm_sq1)) { luaL_error(L, "ERROR: r_norm_sq1 is nan!"); } } /* std::cout << "PCG batch " << (ibatch + 1) << " comp " << (icomponent + 1) << " iter " << iter << ": residual " << std::sqrt(r_norm_sq1) << " (tol " << tol << ", precondType = " << PrecondTypeToString(precond_type) << ")" << std::endl; */ if (verbose) { if (iter == max_iter) { std::cout << "PCG batch " << (ibatch + 1) << " component " << (icomponent + 1) << " hit max iteration count (" << max_iter << ")" << std::endl; } else if (r_norm_sq1 < tol * tol) { std::cout << "PCG batch " << (ibatch + 1) << " component " << (icomponent + 1) << " residual " << std::sqrt(r_norm_sq1) << " fell below tol (" << tol << ")" << std::endl; } } max_residual = std::max(max_residual, std::sqrt(r_norm_sq1)); // For each separate linear system we're free to choose whatever constant // DC term we want. This has no impact on the velocity update, but will // be important if we include a pressure term when training the convnet // (which doesn't like arbitrary output scales / offsets). const float x_mean = THCudaTensor_meanall(state, x_gpu); // The result is in x_gpu. However, this is the pressure in the reduced // system (with non-fluid cells removed), we need to copy this back to the // original Cartesian (d x w x h) system. THCDeviceTensor<float, 1> dev_x = toDeviceTensor<float, 1>(state, x_gpu); LaunchKernel(L, &copyPressureFromSystem, 1, 1, zsize, ysize, xsize, dev_inds, dev_x, pressure, ibatch, x_mean); // Clean up cusparse. // TODO(tompson): Is there anything else to do? CHECK_CUSPARSE(cusparseDestroyMatDescr(descr)); if (precond_type != PRECOND_NONE) { CHECK_CUSPARSE(cusparseDestroySolveAnalysisInfo(info_a)); CHECK_CUSPARSE(cusparseDestroySolveAnalysisInfo(info_u)); CHECK_CUSPARSE(cusparseDestroyMatDescr(descr_l)); CHECK_CUSPARSE(cusparseDestroyMatDescr(descr_u)); if (precond_type == PRECOND_IC0) { CHECK_CUSPARSE(cusparseDestroySolveAnalysisInfo(info_ut)); } } } // for each connected component } // for each batch. lua_pushnumber(L, max_residual); return 1; } //****************************************************************************** // solveLinearSystemJacobi //****************************************************************************** __global__ void kernel_jacobiIteration( CudaFlagGrid flags, CudaRealGrid div, CudaRealGrid pressure, CudaRealGrid prev_pressure, const int bnd) { int32_t b, chan, k, j, i; if (GetKernelIndices(flags, b, chan, k, j, i)) { return; } if (i < bnd || i > flags.xsize() - 1 - bnd || j < bnd || j > flags.ysize() - 1 - bnd || (flags.is_3d() && (k < bnd || k > flags.zsize() - 1 - bnd))) { pressure(i, j, k, b) = 0; // Zero pressure on the border. return; } if (flags.isObstacle(i, j, k, b)) { pressure(i, j, k, b) = 0; return; } // Otherwise in a fluid or empty cell. // TODO(tompson): Is the logic here correct? Should empty cells be non-zero? const float divergence = div(i, j, k, b); // Get all the neighbors const float pC = prev_pressure(i, j, k, b); float p1 = prev_pressure(i - 1, j, k, b); float p2 = prev_pressure(i + 1, j, k, b); float p3 = prev_pressure(i, j - 1, k, b); float p4 = prev_pressure(i, j + 1, k, b); float p5 = flags.is_3d() ? prev_pressure(i, j, k - 1, b) : 0; float p6 = flags.is_3d() ? prev_pressure(i, j, k + 1, b) : 0; if (flags.isObstacle(i - 1, j, k, b)) { p1 = pC; } if (flags.isObstacle(i + 1, j, k, b)) { p2 = pC; } if (flags.isObstacle(i, j - 1, k, b)) { p3 = pC; } if (flags.isObstacle(i, j + 1, k, b)) { p4 = pC; } if (flags.is_3d() && flags.isObstacle(i, j, k - 1, b)) { p5 = pC; } if (flags.is_3d() && flags.isObstacle(i, j, k + 1, b)) { p6 = pC; } const float denom = flags.is_3d() ? 6.0f : 4.0f; const float v = (p1 + p2 + p3 + p4 + p5 + p6 + divergence) / denom; pressure(i, j, k, b) = v; } static int tfluids_CudaMain_solveLinearSystemJacobi(lua_State* L) { THCState* state = cutorch_getstate(L); THCudaTensor* tensor_p = reinterpret_cast<THCudaTensor*>( luaT_checkudata(L, 1, "torch.CudaTensor")); THCudaTensor* tensor_flags = reinterpret_cast<THCudaTensor*>( luaT_checkudata(L, 2, "torch.CudaTensor")); THCudaTensor* tensor_div = reinterpret_cast<THCudaTensor*>( luaT_checkudata(L, 3, "torch.CudaTensor")); THCudaTensor* tensor_p_prev = reinterpret_cast<THCudaTensor*>( luaT_checkudata(L, 4, "torch.CudaTensor")); THCudaTensor* tensor_p_delta = reinterpret_cast<THCudaTensor*>( luaT_checkudata(L, 5, "torch.CudaTensor")); THCudaTensor* tensor_p_delta_norm = reinterpret_cast<THCudaTensor*>( luaT_checkudata(L, 6, "torch.CudaTensor")); const bool is_3d = static_cast<bool>(lua_toboolean(L, 7)); const float p_tol = static_cast<float>(lua_tonumber(L, 8)); const int64_t max_iter = static_cast<int64_t>(lua_tointeger(L, 9)); const bool verbose = static_cast<int64_t>(lua_toboolean(L, 10)); if (max_iter < 1) { luaL_error(L, "At least 1 iteration is needed (maxIter < 1)"); return 0; } CudaFlagGrid flags = toCudaFlagGrid(state, tensor_flags, is_3d); CudaRealGrid pressure = toCudaRealGrid(state, tensor_p, is_3d); CudaRealGrid pressure_prev = toCudaRealGrid(state, tensor_p_prev, is_3d); CudaRealGrid div = toCudaRealGrid(state, tensor_div, is_3d); // Initialize the pressure to zero. THCudaTensor_zero(state, tensor_p); THCudaTensor_zero(state, tensor_p_prev); // Start with the output of the next iteration going to pressure. CudaRealGrid* cur_pressure = &pressure; CudaRealGrid* cur_pressure_prev = &pressure_prev; const int32_t nbatch = flags.nbatch(); const int64_t xsize = flags.xsize(); const int64_t ysize = flags.ysize(); const int64_t zsize = flags.zsize(); const int64_t numel = xsize * ysize * zsize; float residual; int64_t iter = 0; while (true) { const int32_t bnd = 1; // LaunchKernel args: lua_State, func, domain, args... LaunchKernel(L, &kernel_jacobiIteration, flags, flags, div, *cur_pressure, *cur_pressure_prev, bnd); // Current iteration output is now in cur_pressure (wherever that points). // Calculate the change in pressure up to a sign (i.e. the sign might be // incorrect, but we don't care). THCudaTensor_csub(state, tensor_p_delta, tensor_p, 1.0f, tensor_p_prev); THCudaTensor_resize2d(state, tensor_p_delta, nbatch, numel); // Calculate L2 norm over dim 2. THCudaTensor_norm(state, tensor_p_delta_norm, tensor_p_delta,2.0, 2, 1); // Put the view back. THCudaTensor_resize5d(state, tensor_p_delta, nbatch, 1, zsize, ysize, xsize); residual = THCudaTensor_maxall(state, tensor_p_delta_norm); if (verbose) { std::cout << "Jacobi iteration " << (iter + 1) << ": residual " << residual << std::endl; } // TODO(tompson) calculate divergence and implement divtol (it'll make it // slower). // TODO(tompson): We terminate on the worst case batch is this OK? if (residual < p_tol) { if (verbose) { std::cout << "Jacobi max residual fell below p_tol (" << p_tol << ") (terminating)" << std::endl; } break; } iter++; if (iter >= max_iter) { if (verbose) { std::cout << "Jacobi max iteration count (" << max_iter << ") reached (terminating)" << std::endl; } break; } // We haven't yet terminated. CudaRealGrid* tmp = cur_pressure; cur_pressure = cur_pressure_prev; cur_pressure_prev = tmp; } // If we terminated with the cur_pressure pointing to the tmp array, then we // have to copy the pressure back into the output tensor. if (cur_pressure == &pressure_prev) { THCudaTensor_copy(state, tensor_p, tensor_p_prev); // p = p_prev } // Note, mean-subtraction is performed on the lua side. lua_pushnumber(L, residual); return 1; } //****************************************************************************** // INIT METHODS //****************************************************************************** static const struct luaL_Reg tfluids_CudaMain__ [] = { {"advectScalar", tfluids_CudaMain_advectScalar}, {"advectVel", tfluids_CudaMain_advectVel}, {"setWallBcsForward", tfluids_CudaMain_setWallBcsForward}, {"vorticityConfinement", tfluids_CudaMain_vorticityConfinement}, {"addBuoyancy", tfluids_CudaMain_addBuoyancy}, {"addGravity", tfluids_CudaMain_addGravity}, {"velocityUpdateForward", tfluids_CudaMain_velocityUpdateForward}, {"velocityUpdateBackward", tfluids_CudaMain_velocityUpdateBackward}, {"velocityDivergenceForward", tfluids_CudaMain_velocityDivergenceForward}, {"velocityDivergenceBackward", tfluids_CudaMain_velocityDivergenceBackward}, {"emptyDomain", tfluids_CudaMain_emptyDomain}, {"flagsToOccupancy", tfluids_CudaMain_flagsToOccupancy}, {"solveLinearSystemPCG", tfluids_CudaMain_solveLinearSystemPCG}, {"solveLinearSystemJacobi", tfluids_CudaMain_solveLinearSystemJacobi}, {"volumetricUpSamplingNearestForward", tfluids_CudaMain_volumetricUpSamplingNearestForward}, {"volumetricUpSamplingNearestBackward", tfluids_CudaMain_volumetricUpSamplingNearestBackward}, {"signedDistanceField", tfluids_CudaMain_signedDistanceField}, {NULL, NULL} // NOLINT }; const struct luaL_Reg* tfluids_CudaMain_getMethodsTable() { return tfluids_CudaMain__; } void tfluids_CudaMain_init(lua_State* L) { luaT_pushmetatable(L, "torch.CudaTensor"); luaT_registeratname(L, tfluids_CudaMain__, "tfluids"); }
4f342f328c80e9e4e3a376fafbc7164d9009dc5b.hip
// !!! This is a file automatically generated by hipify!!! // includes, system #include <stdlib.h> #include <stdio.h> #include <string.h> #include <math.h> #include <unistd.h> #include <sys/time.h> #include "srad.h" // includes, project #include <hip/hip_runtime.h> // includes, kernels #include "srad_kernel.hip" #include "cudaio.h" #include "timer.h" #include "cuhelper.h" #define R1 0 //y1 position of the speckle #define R2 127 //y2 position of the speckle #define C1 0 //x1 position of the speckle #define C2 127 //x2 position of the speckle #define LAMBDA 0.5 //Lambda value #define NITER 2 //number of iterations static long size, size_I, size_R; static unsigned ticks_pre, ticks_cpu, ticks_gpu, ticks_post; static void calc_matrix(cuio_ptr_t ptr_J, cuio_ptr_t ptr_C, cuio_ptr_t ptr_C_E, cuio_ptr_t ptr_C_W, cuio_ptr_t ptr_C_N, cuio_ptr_t ptr_C_S) { float sum, sum2; float meanROI, varROI, q0sqr; init_tickcount(); sum = 0; sum2 = 0; for (long i = R1; i <= R2; i++) { for (long j = C1; j <= C2; j++) { float tmp = ((float *)ptr_J.ptr_h)[i * size + j]; sum += tmp; sum2 += tmp * tmp; } } ticks_cpu += get_tickcount(); init_tickcount(); meanROI = sum / size_R; varROI = (sum2 / size_R) - meanROI * meanROI; q0sqr = varROI / (meanROI * meanROI); //Currently the input size must be divided by 16 - the block size long block_x = size / (long)BLOCK_SIZE; long block_y = size / (long)BLOCK_SIZE; dim3 dimBlock(BLOCK_SIZE, BLOCK_SIZE); dim3 dimGrid(block_x , block_y); //Copy data from main memory to device memory cuio_memcpy_h2d(&ptr_J); ticks_pre += get_tickcount(); init_tickcount(); //Run kernels hipLaunchKernelGGL(( srad_cuda_1), dim3(dimGrid), dim3(dimBlock), 0, 0, CUIO_FLOATS_D(ptr_C_E), CUIO_FLOATS_D(ptr_C_W), CUIO_FLOATS_D(ptr_C_N), CUIO_FLOATS_D(ptr_C_S), CUIO_FLOATS_D(ptr_J), CUIO_FLOATS_D(ptr_C), size, size, q0sqr); hipLaunchKernelGGL(( srad_cuda_2), dim3(dimGrid), dim3(dimBlock), 0, 0, CUIO_FLOATS_D(ptr_C_E), CUIO_FLOATS_D(ptr_C_W), CUIO_FLOATS_D(ptr_C_N), CUIO_FLOATS_D(ptr_C_S), CUIO_FLOATS_D(ptr_J), CUIO_FLOATS_D(ptr_C), size, size, LAMBDA, q0sqr); CUDA_CALL_SAFE(hipDeviceSynchronize()); ticks_gpu += get_tickcount(); init_tickcount(); //Copy data from device memory to main memory cuio_memcpy_d2h(&ptr_J); ticks_post += get_tickcount(); } static void confer_load(FILE *fp, const char *fpath, void *ctx) { char buf[1024]; long *psize = (long *)ctx; if (fgets(buf, 1024, fp) == NULL) { fprintf(stderr, "cannot get # of boxes: %s\n", fpath); exit(2); } if (sscanf(buf, "%ld", psize) != 1) { fprintf(stderr, "invalid format: %s\n", fpath); exit(3); } } int main(int argc, char *argv[]) { cuio_ptr_t ptr_J, ptr_C, ptr_C_E, ptr_C_W, ptr_C_N, ptr_C_S; char *folder; printf("WG size of kernel = %d X %d\n", BLOCK_SIZE, BLOCK_SIZE); if (argc != 2) { fprintf(stderr, "Usage: %s <folder>\n", argv[0]); exit(1); } folder = argv[1]; init_tickcount(); cuio_init(CUIO_TYPE_NONE, folder); cuio_load_conf(confer_load, &size); size_I = size * size; size_R = (R2 - R1 + 1) * (C2 - C1 + 1); ptr_J = cuio_load_floats("matrix.mem", size_I, CUIO_MODE_READWRITE); ptr_C = cuio_load_floats("matrix.C", size_I, CUIO_MODE_WRITEONLY); ptr_C_E = cuio_load_floats("matrix.C.E", size_I, CUIO_MODE_WRITEONLY); ptr_C_W = cuio_load_floats("matrix.C.W", size_I, CUIO_MODE_WRITEONLY); ptr_C_N = cuio_load_floats("matrix.C.N", size_I, CUIO_MODE_WRITEONLY); ptr_C_S = cuio_load_floats("matrix.C.S", size_I, CUIO_MODE_WRITEONLY); ticks_pre += get_tickcount(); printf("Start the SRAD main loop\n"); for (int iter = 0; iter < NITER; iter++) { calc_matrix(ptr_J, ptr_C, ptr_C_E, ptr_C_W, ptr_C_N, ptr_C_S); } init_tickcount(); cuio_unload_floats("matrix.mem", &ptr_J); cuio_free_mem(&ptr_C); cuio_free_mem(&ptr_C_E); cuio_free_mem(&ptr_C_W); cuio_free_mem(&ptr_C_N); cuio_free_mem(&ptr_C_S); ticks_post += get_tickcount(); printf("Computation Done\n"); printf("pre time(us): %u\n", ticks_pre); printf("kernel time(us): %u(gpu:%u)\n", ticks_cpu + ticks_gpu, ticks_gpu); printf("post time(us): %u\n", ticks_post); return 0; }
4f342f328c80e9e4e3a376fafbc7164d9009dc5b.cu
// includes, system #include <stdlib.h> #include <stdio.h> #include <string.h> #include <math.h> #include <unistd.h> #include <sys/time.h> #include "srad.h" // includes, project #include <cuda.h> // includes, kernels #include "srad_kernel.cu" #include "cudaio.h" #include "timer.h" #include "cuhelper.h" #define R1 0 //y1 position of the speckle #define R2 127 //y2 position of the speckle #define C1 0 //x1 position of the speckle #define C2 127 //x2 position of the speckle #define LAMBDA 0.5 //Lambda value #define NITER 2 //number of iterations static long size, size_I, size_R; static unsigned ticks_pre, ticks_cpu, ticks_gpu, ticks_post; static void calc_matrix(cuio_ptr_t ptr_J, cuio_ptr_t ptr_C, cuio_ptr_t ptr_C_E, cuio_ptr_t ptr_C_W, cuio_ptr_t ptr_C_N, cuio_ptr_t ptr_C_S) { float sum, sum2; float meanROI, varROI, q0sqr; init_tickcount(); sum = 0; sum2 = 0; for (long i = R1; i <= R2; i++) { for (long j = C1; j <= C2; j++) { float tmp = ((float *)ptr_J.ptr_h)[i * size + j]; sum += tmp; sum2 += tmp * tmp; } } ticks_cpu += get_tickcount(); init_tickcount(); meanROI = sum / size_R; varROI = (sum2 / size_R) - meanROI * meanROI; q0sqr = varROI / (meanROI * meanROI); //Currently the input size must be divided by 16 - the block size long block_x = size / (long)BLOCK_SIZE; long block_y = size / (long)BLOCK_SIZE; dim3 dimBlock(BLOCK_SIZE, BLOCK_SIZE); dim3 dimGrid(block_x , block_y); //Copy data from main memory to device memory cuio_memcpy_h2d(&ptr_J); ticks_pre += get_tickcount(); init_tickcount(); //Run kernels srad_cuda_1<<<dimGrid, dimBlock>>>(CUIO_FLOATS_D(ptr_C_E), CUIO_FLOATS_D(ptr_C_W), CUIO_FLOATS_D(ptr_C_N), CUIO_FLOATS_D(ptr_C_S), CUIO_FLOATS_D(ptr_J), CUIO_FLOATS_D(ptr_C), size, size, q0sqr); srad_cuda_2<<<dimGrid, dimBlock>>>(CUIO_FLOATS_D(ptr_C_E), CUIO_FLOATS_D(ptr_C_W), CUIO_FLOATS_D(ptr_C_N), CUIO_FLOATS_D(ptr_C_S), CUIO_FLOATS_D(ptr_J), CUIO_FLOATS_D(ptr_C), size, size, LAMBDA, q0sqr); CUDA_CALL_SAFE(cudaDeviceSynchronize()); ticks_gpu += get_tickcount(); init_tickcount(); //Copy data from device memory to main memory cuio_memcpy_d2h(&ptr_J); ticks_post += get_tickcount(); } static void confer_load(FILE *fp, const char *fpath, void *ctx) { char buf[1024]; long *psize = (long *)ctx; if (fgets(buf, 1024, fp) == NULL) { fprintf(stderr, "cannot get # of boxes: %s\n", fpath); exit(2); } if (sscanf(buf, "%ld", psize) != 1) { fprintf(stderr, "invalid format: %s\n", fpath); exit(3); } } int main(int argc, char *argv[]) { cuio_ptr_t ptr_J, ptr_C, ptr_C_E, ptr_C_W, ptr_C_N, ptr_C_S; char *folder; printf("WG size of kernel = %d X %d\n", BLOCK_SIZE, BLOCK_SIZE); if (argc != 2) { fprintf(stderr, "Usage: %s <folder>\n", argv[0]); exit(1); } folder = argv[1]; init_tickcount(); cuio_init(CUIO_TYPE_NONE, folder); cuio_load_conf(confer_load, &size); size_I = size * size; size_R = (R2 - R1 + 1) * (C2 - C1 + 1); ptr_J = cuio_load_floats("matrix.mem", size_I, CUIO_MODE_READWRITE); ptr_C = cuio_load_floats("matrix.C", size_I, CUIO_MODE_WRITEONLY); ptr_C_E = cuio_load_floats("matrix.C.E", size_I, CUIO_MODE_WRITEONLY); ptr_C_W = cuio_load_floats("matrix.C.W", size_I, CUIO_MODE_WRITEONLY); ptr_C_N = cuio_load_floats("matrix.C.N", size_I, CUIO_MODE_WRITEONLY); ptr_C_S = cuio_load_floats("matrix.C.S", size_I, CUIO_MODE_WRITEONLY); ticks_pre += get_tickcount(); printf("Start the SRAD main loop\n"); for (int iter = 0; iter < NITER; iter++) { calc_matrix(ptr_J, ptr_C, ptr_C_E, ptr_C_W, ptr_C_N, ptr_C_S); } init_tickcount(); cuio_unload_floats("matrix.mem", &ptr_J); cuio_free_mem(&ptr_C); cuio_free_mem(&ptr_C_E); cuio_free_mem(&ptr_C_W); cuio_free_mem(&ptr_C_N); cuio_free_mem(&ptr_C_S); ticks_post += get_tickcount(); printf("Computation Done\n"); printf("pre time(us): %u\n", ticks_pre); printf("kernel time(us): %u(gpu:%u)\n", ticks_cpu + ticks_gpu, ticks_gpu); printf("post time(us): %u\n", ticks_post); return 0; }
1cffc74a29f953b5485f8f0b9eb5acef03934c31.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include <ATen/hip/HIPContext.h> #include <c10/util/complex.h> #include <stdio.h> #include <torch/extension.h> #include <torch/python.h> #include <ATen/native/hip/block_reduce.cuh> #include <THH/THHAtomics.cuh> #define CHECK_DEVICE(x) \ TORCH_CHECK(x.device().type() == torch::kCUDA, #x " must be on CUDA") #define CHECK_SHAPE(x, ...) \ TORCH_CHECK( \ x.sizes() == torch::IntArrayRef({__VA_ARGS__}), \ #x " must have shape (" #__VA_ARGS__ ")") #define REDUCE_THREADS 128 #define REDUCE_THREADS_FWD 32 #define REDUCE_THREADS_A 512 #define REDUCE_THREADS_B 64 #define REDUCE_THREADS_C 1024 template <typename T, size_t N> using CudaAcsr = at::GenericPackedTensorAccessor<T, N, at::RestrictPtrTraits, int32_t>; template <int NUM_THREADS, typename scalar_t> __global__ void kernel_coefficient_forward_kernel( CudaAcsr<scalar_t, 4> a, CudaAcsr<scalar_t, 3> b, CudaAcsr<scalar_t, 2> c, CudaAcsr<scalar_t, 4> out, int N, int H ) { __shared__ char shared_ch[NUM_THREADS * sizeof(scalar_t)]; scalar_t* shared = (scalar_t*)&shared_ch; __shared__ char shared_b_elem_ch[sizeof(scalar_t)]; scalar_t* shared_b_elem = (scalar_t*)&shared_b_elem_ch; int ic = blockIdx.x; int l = blockIdx.y; int qh = blockIdx.z; int q = qh / H; int h = qh % H; if (threadIdx.x == 0) {shared_b_elem[0] = b[q][l][h];} __syncthreads(); scalar_t val = scalar_t(0.0); for (int i = threadIdx.x; i < N; i += NUM_THREADS) { val += a[ic][q][h][i] / (shared_b_elem[0] - c[q][i]); } shared[threadIdx.x] = val; __syncthreads(); val = at::native::cuda_utils::BlockReduceSum<scalar_t>(val, shared); if (threadIdx.x == 0) {out[ic][q][l][h] = val;} } template <typename T> torch::Tensor kernel_coefficient_forward(torch::Tensor a, torch::Tensor b, torch::Tensor c, torch::Tensor out) { CHECK_DEVICE(a); CHECK_DEVICE(b); CHECK_DEVICE(c); const auto IC = a.size(0); const auto Q = a.size(1); // num heads const auto H = a.size(2); // input dim const auto N = a.size(3); // hidden dim const auto L = b.size(1); // seq length CHECK_SHAPE(b, Q, L, H); CHECK_SHAPE(c, Q, N); auto stream = at::hip::getCurrentHIPStreamMasqueradingAsCUDA(); using scalar_t = c10::complex<T>; const auto a_p = a.packed_accessor32<scalar_t, 4, at::RestrictPtrTraits>(); const auto b_p = b.packed_accessor32<scalar_t, 3, at::RestrictPtrTraits>(); const auto c_p = c.packed_accessor32<scalar_t, 2, at::RestrictPtrTraits>(); auto out_p = out.packed_accessor32<scalar_t, 4, at::RestrictPtrTraits>(); dim3 grid(IC, L, Q * H); dim3 block(REDUCE_THREADS_FWD); hipLaunchKernelGGL(( kernel_coefficient_forward_kernel<REDUCE_THREADS_FWD, scalar_t>) , dim3(grid), dim3(block), 0, stream, a_p, b_p, c_p, out_p, N, H); return out; } template <int NUM_THREADS, typename scalar_t> __global__ void _abc_kernel( CudaAcsr<scalar_t, 4> a, CudaAcsr<scalar_t, 3> b, CudaAcsr<scalar_t, 2> c, CudaAcsr<scalar_t, 4> dout, CudaAcsr<scalar_t, 4> da, CudaAcsr<scalar_t, 3> db, CudaAcsr<scalar_t, 2> dc, int Q, int IC, int L, int H, int N, int G /* = IC * H * N + L * H + N */) { __shared__ char sh_array_ch[NUM_THREADS * sizeof(scalar_t)]; scalar_t* sh_array = (scalar_t*)&sh_array_ch; __shared__ char sh_elem_ch[sizeof(scalar_t)]; scalar_t* sh_elem = (scalar_t*)&sh_elem_ch; /* Here, G = IC * H * N + L * H + N In total, there are G * Q jobs (blockIdx.x, blockIdx.x) to prepare an idx in [0, G - 1] blockIdx.z = Q */ int my_idx = blockIdx.x * L + blockIdx.y; /* [0, N - 1] for dc; let sa = N [sa, sa + IC * H * N - 1] for da; let sb = sa + IC * H * N [sb, sb + L * H - 1] for db */ if (my_idx >= G) return; int sa = N; int sb = sa + IC * H * N; scalar_t val = scalar_t(0.0); int q = blockIdx.z; int tot; if (my_idx < sa) { // dc = sum_{ic, h, l} a/[(b-c)*]^2 int n = my_idx % N; tot = IC * H * L; if (threadIdx.x == 0) { sh_elem[0] = c[q][n]; } __syncthreads(); for (int i = threadIdx.x; i < tot; i += NUM_THREADS) { int ic = i % IC; int hl = i / IC; int h = hl % H; int l = hl / H; scalar_t diff_conj_inv = scalar_t(1.0) / std::conj(b[q][l][h] - sh_elem[0]); val += dout[ic][q][l][h] * a[ic][q][h][n] * diff_conj_inv * diff_conj_inv; } sh_array[threadIdx.x] = val; __syncthreads(); val = at::native::cuda_utils::BlockReduceSum<scalar_t>(val, sh_array); if (threadIdx.x == 0) { dc[q][n] = val; } } else if (my_idx < sb) { // da = sum_{l} 1/[(b-c)*] tot = L; my_idx -= sa; int ic = my_idx % IC; int h_n = my_idx / IC; int h = h_n % H; int n = h_n / H; if (threadIdx.x == 0) { sh_elem[0] = c[q][n]; } __syncthreads(); for (int i = threadIdx.x; i < tot; i += NUM_THREADS) { scalar_t diff_conj_inv = scalar_t(1.0) / std::conj(b[q][i][h] - sh_elem[0]); val += dout[ic][q][i][h] * diff_conj_inv; } sh_array[threadIdx.x] = val; __syncthreads(); val = at::native::cuda_utils::BlockReduceSum<scalar_t>(val, sh_array); if (threadIdx.x == 0) { da[ic][q][h][n] = val; } } else { // db = sum_{ic, n} -a/[(b-c)*]^2 tot = IC * N; my_idx -= sb; int l = my_idx % L; int h = my_idx / L; if (threadIdx.x == 0) { sh_elem[0] = b[q][l][h]; } __syncthreads(); for (int i = threadIdx.x; i < tot; i += NUM_THREADS) { int ic = i % IC; int n = i / IC; scalar_t diff_conj_inv = scalar_t(1.0) / std::conj(sh_elem[0] - c[q][n]); val -= dout[ic][q][l][h] * a[ic][q][h][n] * diff_conj_inv * diff_conj_inv; } sh_array[threadIdx.x] = val; __syncthreads(); val = at::native::cuda_utils::BlockReduceSum<scalar_t>(val, sh_array); if (threadIdx.x == 0) { db[q][l][h] = val; } } } template <int NUM_THREADS, typename scalar_t> __global__ void kernel_coefficient_backward_a_kernel( CudaAcsr<scalar_t, 4> a, CudaAcsr<scalar_t, 3> b, CudaAcsr<scalar_t, 2> c, CudaAcsr<scalar_t, 4> dout, CudaAcsr<scalar_t, 4> da, int L, int H) { // da = sum_{l} 1/[(b-c)*] __shared__ char shared_ch[NUM_THREADS * sizeof(scalar_t)]; scalar_t* shared = (scalar_t*)&shared_ch; __shared__ char shared_c_elem_ch[sizeof(scalar_t)]; scalar_t* shared_c_elem = (scalar_t*)&shared_c_elem_ch; int ic = blockIdx.x; int q = blockIdx.y; int hn = blockIdx.z; int h = hn % H; int n = hn / H; if (threadIdx.x == 0) { shared_c_elem[0] = c[q][n]; } __syncthreads(); scalar_t val = scalar_t(0.0); for (int i = threadIdx.x; i < L; i += NUM_THREADS) { scalar_t diff_conj_inv = scalar_t(1.0) / std::conj(b[q][i][h] - shared_c_elem[0]); val += dout[ic][q][i][h] * diff_conj_inv; } shared[threadIdx.x] = val; __syncthreads(); val = at::native::cuda_utils::BlockReduceSum<scalar_t>(val, shared); if (threadIdx.x == 0) { da[ic][q][h][n] = val; } } template <int NUM_THREADS, typename scalar_t> __global__ void kernel_coefficient_backward_b_kernel( CudaAcsr<scalar_t, 4> a, CudaAcsr<scalar_t, 3> b, CudaAcsr<scalar_t, 2> c, CudaAcsr<scalar_t, 4> dout, CudaAcsr<scalar_t, 3> db, int N, int IC) { // db = sum_{ic, n} -a/[(b-c)*]^2 __shared__ char shared_ch[NUM_THREADS * sizeof(scalar_t)]; scalar_t* shared = (scalar_t*)&shared_ch; __shared__ char shared_b_elem_ch[sizeof(scalar_t)]; scalar_t* shared_b_elem = (scalar_t*)&shared_b_elem_ch; int q = blockIdx.x; int l = blockIdx.y; int h = blockIdx.z; scalar_t val = scalar_t(0.0); if (threadIdx.x == 0) { shared_b_elem[0] = b[q][l][h]; } __syncthreads(); int tot = IC * N; for (int i = threadIdx.x; i < tot; i += NUM_THREADS) { int ic = i % IC; int n = i / IC; scalar_t diff_conj_inv = scalar_t(1.0) / std::conj(shared_b_elem[0] - c[q][n]); val -= dout[ic][q][l][h] * a[ic][q][h][n] * diff_conj_inv * diff_conj_inv; } shared[threadIdx.x] = val; __syncthreads(); val = at::native::cuda_utils::BlockReduceSum<scalar_t>(val, shared); if (threadIdx.x == 0) { db[q][l][h] = val; } } template <int NUM_THREADS, typename scalar_t> __global__ void kernel_coefficient_backward_c_kernel( CudaAcsr<scalar_t, 4> a, CudaAcsr<scalar_t, 3> b, CudaAcsr<scalar_t, 2> c, CudaAcsr<scalar_t, 4> dout, CudaAcsr<scalar_t, 2> dc, int IC, int H, int L) { // dc = sum_{ic, h, l} a/[(b-c)*]^2 __shared__ char shared_ch[NUM_THREADS * sizeof(scalar_t)]; scalar_t* shared = (scalar_t*)&shared_ch; __shared__ char shared_c_elem_ch[sizeof(scalar_t)]; scalar_t* shared_c_elem = (scalar_t*)&shared_c_elem_ch; int q = blockIdx.x; int n = blockIdx.y; scalar_t val = scalar_t(0.0); int tot = IC * H * L; if (threadIdx.x == 0) { shared_c_elem[0] = c[q][n]; } __syncthreads(); // scalar_t c_kh = 0; // Kahan sum for precision for (int i = threadIdx.x; i < tot; i += NUM_THREADS) { int ic = i % IC; int hl = i / IC; int h = hl % H; int l = hl / H; scalar_t diff_conj_inv = scalar_t(1.0) / std::conj(b[q][l][h] - shared_c_elem[0]); /* scalar_t elem = dout[ic][q][l][h] * a[ic][q][h][n] * diff_conj_inv * diff_conj_inv - c_kh; scalar_t t_kh = val + elem; c_kh = (t_kh - val) - elem; val = t_kh; */ val += dout[ic][q][l][h] * a[ic][q][h][n] * diff_conj_inv * diff_conj_inv; } shared[threadIdx.x] = val; __syncthreads(); val = at::native::cuda_utils::BlockReduceSum<scalar_t>(val, shared); if (threadIdx.x == 0) { dc[q][n] = val; } } template <typename T> std::tuple<torch::Tensor, torch::Tensor, torch::Tensor> kernel_coefficient_backward( torch::Tensor a, torch::Tensor b, torch::Tensor c, torch::Tensor dout, torch::Tensor da, torch::Tensor db, torch::Tensor dc) { CHECK_DEVICE(a); CHECK_DEVICE(b); CHECK_DEVICE(c); CHECK_DEVICE(dout); const auto IC = a.size(0); const auto Q = a.size(1); // num heads const auto H = a.size(2); // input dim const auto N = a.size(3); // hidden dim const auto L = b.size(1); // seq length CHECK_SHAPE(b, Q, L, H); CHECK_SHAPE(c, Q, N); CHECK_SHAPE(dout, IC, Q, L, H); auto stream = at::hip::getCurrentHIPStreamMasqueradingAsCUDA(); using scalar_t = c10::complex<T>; const auto a_p = a.packed_accessor32<scalar_t, 4, at::RestrictPtrTraits>(); const auto b_p = b.packed_accessor32<scalar_t, 3, at::RestrictPtrTraits>(); const auto c_p = c.packed_accessor32<scalar_t, 2, at::RestrictPtrTraits>(); const auto dout_p = dout.packed_accessor32<scalar_t, 4, at::RestrictPtrTraits>(); auto da_p = da.packed_accessor32<scalar_t, 4, at::RestrictPtrTraits>(); auto db_p = db.packed_accessor32<scalar_t, 3, at::RestrictPtrTraits>(); auto dc_p = dc.packed_accessor32<scalar_t, 2, at::RestrictPtrTraits>(); dim3 grid_da(IC, Q, H * N); dim3 blocka(REDUCE_THREADS_A); dim3 grid_db(Q, L, H); dim3 blockb(REDUCE_THREADS_B); dim3 grid_dc(Q, N); dim3 blockc(REDUCE_THREADS_C); hipLaunchKernelGGL(( kernel_coefficient_backward_a_kernel<REDUCE_THREADS_A, scalar_t>) , dim3(grid_da), dim3(blocka), 0, stream, a_p, b_p, c_p, dout_p, da_p, L, H); hipLaunchKernelGGL(( kernel_coefficient_backward_b_kernel<REDUCE_THREADS_B, scalar_t>) , dim3(grid_db), dim3(blockb), 0, stream, a_p, b_p, c_p, dout_p, db_p, N, IC); hipLaunchKernelGGL(( kernel_coefficient_backward_c_kernel<REDUCE_THREADS_C, scalar_t>) , dim3(grid_dc), dim3(blockc), 0, stream, a_p, b_p, c_p, dout_p, dc_p, IC, H, L); /*int G = IC * H * N + L * H + N; dim3 grid((G + L - 1) / L, L, Q); dim3 block(REDUCE_THREADS); kernel_coefficient_backward_abc_kernel<REDUCE_THREADS, scalar_t> <<<grid, block, 0, stream>>>( a_p, b_p, c_p, dout_p, da_p, db_p, dc_p, Q, IC, L, H, N, G);*/ return std::make_tuple(da, db, dc); } // Instantiate for floating point types template torch::Tensor kernel_coefficient_forward<float>(torch::Tensor a, torch::Tensor b, torch::Tensor c, torch::Tensor out); template torch::Tensor kernel_coefficient_forward<double>(torch::Tensor a, torch::Tensor b, torch::Tensor c, torch::Tensor out); template std::tuple<torch::Tensor, torch::Tensor, torch::Tensor> kernel_coefficient_backward<float>( torch::Tensor a, torch::Tensor b, torch::Tensor c, torch::Tensor dout, torch::Tensor da, torch::Tensor db, torch::Tensor dc); template std::tuple<torch::Tensor, torch::Tensor, torch::Tensor> kernel_coefficient_backward<double>( torch::Tensor a, torch::Tensor b, torch::Tensor c, torch::Tensor dout, torch::Tensor da, torch::Tensor db, torch::Tensor dc); // int mlp_fp<float>( // float* X, // int input_features, // int batch_size, // float** WPtr, // float** BPtr, // int num_layers, // int* output_features, // float* Y, // float* reserved_space, // float* reserved_activations, // uint8_t* reserved_mask, // void* lt_workspace, // float p); // PYBIND11_MODULE(ssm_kernel_coefficient_binding, m) { // m.def("kernel_coefficient_forward_float", &kernel_coefficient_forward<float>); // m.def( // "kernel_coefficient_forward_double", &kernel_coefficient_forward<double>); // m.def( // "kernel_coefficient_backward_float", &kernel_coefficient_backward<float>); // m.def( // "kernel_coefficient_backward_double", // &kernel_coefficient_backward<double>); // }
1cffc74a29f953b5485f8f0b9eb5acef03934c31.cu
#include <ATen/cuda/CUDAContext.h> #include <c10/util/complex.h> #include <stdio.h> #include <torch/extension.h> #include <torch/python.h> #include <ATen/native/cuda/block_reduce.cuh> #include <THC/THCAtomics.cuh> #define CHECK_DEVICE(x) \ TORCH_CHECK(x.device().type() == torch::kCUDA, #x " must be on CUDA") #define CHECK_SHAPE(x, ...) \ TORCH_CHECK( \ x.sizes() == torch::IntArrayRef({__VA_ARGS__}), \ #x " must have shape (" #__VA_ARGS__ ")") #define REDUCE_THREADS 128 #define REDUCE_THREADS_FWD 32 #define REDUCE_THREADS_A 512 #define REDUCE_THREADS_B 64 #define REDUCE_THREADS_C 1024 template <typename T, size_t N> using CudaAcsr = at::GenericPackedTensorAccessor<T, N, at::RestrictPtrTraits, int32_t>; template <int NUM_THREADS, typename scalar_t> __global__ void kernel_coefficient_forward_kernel( CudaAcsr<scalar_t, 4> a, CudaAcsr<scalar_t, 3> b, CudaAcsr<scalar_t, 2> c, CudaAcsr<scalar_t, 4> out, int N, int H ) { __shared__ char shared_ch[NUM_THREADS * sizeof(scalar_t)]; scalar_t* shared = (scalar_t*)&shared_ch; __shared__ char shared_b_elem_ch[sizeof(scalar_t)]; scalar_t* shared_b_elem = (scalar_t*)&shared_b_elem_ch; int ic = blockIdx.x; int l = blockIdx.y; int qh = blockIdx.z; int q = qh / H; int h = qh % H; if (threadIdx.x == 0) {shared_b_elem[0] = b[q][l][h];} __syncthreads(); scalar_t val = scalar_t(0.0); for (int i = threadIdx.x; i < N; i += NUM_THREADS) { val += a[ic][q][h][i] / (shared_b_elem[0] - c[q][i]); } shared[threadIdx.x] = val; __syncthreads(); val = at::native::cuda_utils::BlockReduceSum<scalar_t>(val, shared); if (threadIdx.x == 0) {out[ic][q][l][h] = val;} } template <typename T> torch::Tensor kernel_coefficient_forward(torch::Tensor a, torch::Tensor b, torch::Tensor c, torch::Tensor out) { CHECK_DEVICE(a); CHECK_DEVICE(b); CHECK_DEVICE(c); const auto IC = a.size(0); const auto Q = a.size(1); // num heads const auto H = a.size(2); // input dim const auto N = a.size(3); // hidden dim const auto L = b.size(1); // seq length CHECK_SHAPE(b, Q, L, H); CHECK_SHAPE(c, Q, N); auto stream = at::cuda::getCurrentCUDAStream(); using scalar_t = c10::complex<T>; const auto a_p = a.packed_accessor32<scalar_t, 4, at::RestrictPtrTraits>(); const auto b_p = b.packed_accessor32<scalar_t, 3, at::RestrictPtrTraits>(); const auto c_p = c.packed_accessor32<scalar_t, 2, at::RestrictPtrTraits>(); auto out_p = out.packed_accessor32<scalar_t, 4, at::RestrictPtrTraits>(); dim3 grid(IC, L, Q * H); dim3 block(REDUCE_THREADS_FWD); kernel_coefficient_forward_kernel<REDUCE_THREADS_FWD, scalar_t> <<<grid, block, 0, stream>>>(a_p, b_p, c_p, out_p, N, H); return out; } template <int NUM_THREADS, typename scalar_t> __global__ void _abc_kernel( CudaAcsr<scalar_t, 4> a, CudaAcsr<scalar_t, 3> b, CudaAcsr<scalar_t, 2> c, CudaAcsr<scalar_t, 4> dout, CudaAcsr<scalar_t, 4> da, CudaAcsr<scalar_t, 3> db, CudaAcsr<scalar_t, 2> dc, int Q, int IC, int L, int H, int N, int G /* = IC * H * N + L * H + N */) { __shared__ char sh_array_ch[NUM_THREADS * sizeof(scalar_t)]; scalar_t* sh_array = (scalar_t*)&sh_array_ch; __shared__ char sh_elem_ch[sizeof(scalar_t)]; scalar_t* sh_elem = (scalar_t*)&sh_elem_ch; /* Here, G = IC * H * N + L * H + N In total, there are G * Q jobs (blockIdx.x, blockIdx.x) to prepare an idx in [0, G - 1] blockIdx.z = Q */ int my_idx = blockIdx.x * L + blockIdx.y; /* [0, N - 1] for dc; let sa = N [sa, sa + IC * H * N - 1] for da; let sb = sa + IC * H * N [sb, sb + L * H - 1] for db */ if (my_idx >= G) return; int sa = N; int sb = sa + IC * H * N; scalar_t val = scalar_t(0.0); int q = blockIdx.z; int tot; if (my_idx < sa) { // dc = sum_{ic, h, l} a/[(b-c)*]^2 int n = my_idx % N; tot = IC * H * L; if (threadIdx.x == 0) { sh_elem[0] = c[q][n]; } __syncthreads(); for (int i = threadIdx.x; i < tot; i += NUM_THREADS) { int ic = i % IC; int hl = i / IC; int h = hl % H; int l = hl / H; scalar_t diff_conj_inv = scalar_t(1.0) / std::conj(b[q][l][h] - sh_elem[0]); val += dout[ic][q][l][h] * a[ic][q][h][n] * diff_conj_inv * diff_conj_inv; } sh_array[threadIdx.x] = val; __syncthreads(); val = at::native::cuda_utils::BlockReduceSum<scalar_t>(val, sh_array); if (threadIdx.x == 0) { dc[q][n] = val; } } else if (my_idx < sb) { // da = sum_{l} 1/[(b-c)*] tot = L; my_idx -= sa; int ic = my_idx % IC; int h_n = my_idx / IC; int h = h_n % H; int n = h_n / H; if (threadIdx.x == 0) { sh_elem[0] = c[q][n]; } __syncthreads(); for (int i = threadIdx.x; i < tot; i += NUM_THREADS) { scalar_t diff_conj_inv = scalar_t(1.0) / std::conj(b[q][i][h] - sh_elem[0]); val += dout[ic][q][i][h] * diff_conj_inv; } sh_array[threadIdx.x] = val; __syncthreads(); val = at::native::cuda_utils::BlockReduceSum<scalar_t>(val, sh_array); if (threadIdx.x == 0) { da[ic][q][h][n] = val; } } else { // db = sum_{ic, n} -a/[(b-c)*]^2 tot = IC * N; my_idx -= sb; int l = my_idx % L; int h = my_idx / L; if (threadIdx.x == 0) { sh_elem[0] = b[q][l][h]; } __syncthreads(); for (int i = threadIdx.x; i < tot; i += NUM_THREADS) { int ic = i % IC; int n = i / IC; scalar_t diff_conj_inv = scalar_t(1.0) / std::conj(sh_elem[0] - c[q][n]); val -= dout[ic][q][l][h] * a[ic][q][h][n] * diff_conj_inv * diff_conj_inv; } sh_array[threadIdx.x] = val; __syncthreads(); val = at::native::cuda_utils::BlockReduceSum<scalar_t>(val, sh_array); if (threadIdx.x == 0) { db[q][l][h] = val; } } } template <int NUM_THREADS, typename scalar_t> __global__ void kernel_coefficient_backward_a_kernel( CudaAcsr<scalar_t, 4> a, CudaAcsr<scalar_t, 3> b, CudaAcsr<scalar_t, 2> c, CudaAcsr<scalar_t, 4> dout, CudaAcsr<scalar_t, 4> da, int L, int H) { // da = sum_{l} 1/[(b-c)*] __shared__ char shared_ch[NUM_THREADS * sizeof(scalar_t)]; scalar_t* shared = (scalar_t*)&shared_ch; __shared__ char shared_c_elem_ch[sizeof(scalar_t)]; scalar_t* shared_c_elem = (scalar_t*)&shared_c_elem_ch; int ic = blockIdx.x; int q = blockIdx.y; int hn = blockIdx.z; int h = hn % H; int n = hn / H; if (threadIdx.x == 0) { shared_c_elem[0] = c[q][n]; } __syncthreads(); scalar_t val = scalar_t(0.0); for (int i = threadIdx.x; i < L; i += NUM_THREADS) { scalar_t diff_conj_inv = scalar_t(1.0) / std::conj(b[q][i][h] - shared_c_elem[0]); val += dout[ic][q][i][h] * diff_conj_inv; } shared[threadIdx.x] = val; __syncthreads(); val = at::native::cuda_utils::BlockReduceSum<scalar_t>(val, shared); if (threadIdx.x == 0) { da[ic][q][h][n] = val; } } template <int NUM_THREADS, typename scalar_t> __global__ void kernel_coefficient_backward_b_kernel( CudaAcsr<scalar_t, 4> a, CudaAcsr<scalar_t, 3> b, CudaAcsr<scalar_t, 2> c, CudaAcsr<scalar_t, 4> dout, CudaAcsr<scalar_t, 3> db, int N, int IC) { // db = sum_{ic, n} -a/[(b-c)*]^2 __shared__ char shared_ch[NUM_THREADS * sizeof(scalar_t)]; scalar_t* shared = (scalar_t*)&shared_ch; __shared__ char shared_b_elem_ch[sizeof(scalar_t)]; scalar_t* shared_b_elem = (scalar_t*)&shared_b_elem_ch; int q = blockIdx.x; int l = blockIdx.y; int h = blockIdx.z; scalar_t val = scalar_t(0.0); if (threadIdx.x == 0) { shared_b_elem[0] = b[q][l][h]; } __syncthreads(); int tot = IC * N; for (int i = threadIdx.x; i < tot; i += NUM_THREADS) { int ic = i % IC; int n = i / IC; scalar_t diff_conj_inv = scalar_t(1.0) / std::conj(shared_b_elem[0] - c[q][n]); val -= dout[ic][q][l][h] * a[ic][q][h][n] * diff_conj_inv * diff_conj_inv; } shared[threadIdx.x] = val; __syncthreads(); val = at::native::cuda_utils::BlockReduceSum<scalar_t>(val, shared); if (threadIdx.x == 0) { db[q][l][h] = val; } } template <int NUM_THREADS, typename scalar_t> __global__ void kernel_coefficient_backward_c_kernel( CudaAcsr<scalar_t, 4> a, CudaAcsr<scalar_t, 3> b, CudaAcsr<scalar_t, 2> c, CudaAcsr<scalar_t, 4> dout, CudaAcsr<scalar_t, 2> dc, int IC, int H, int L) { // dc = sum_{ic, h, l} a/[(b-c)*]^2 __shared__ char shared_ch[NUM_THREADS * sizeof(scalar_t)]; scalar_t* shared = (scalar_t*)&shared_ch; __shared__ char shared_c_elem_ch[sizeof(scalar_t)]; scalar_t* shared_c_elem = (scalar_t*)&shared_c_elem_ch; int q = blockIdx.x; int n = blockIdx.y; scalar_t val = scalar_t(0.0); int tot = IC * H * L; if (threadIdx.x == 0) { shared_c_elem[0] = c[q][n]; } __syncthreads(); // scalar_t c_kh = 0; // Kahan sum for precision for (int i = threadIdx.x; i < tot; i += NUM_THREADS) { int ic = i % IC; int hl = i / IC; int h = hl % H; int l = hl / H; scalar_t diff_conj_inv = scalar_t(1.0) / std::conj(b[q][l][h] - shared_c_elem[0]); /* scalar_t elem = dout[ic][q][l][h] * a[ic][q][h][n] * diff_conj_inv * diff_conj_inv - c_kh; scalar_t t_kh = val + elem; c_kh = (t_kh - val) - elem; val = t_kh; */ val += dout[ic][q][l][h] * a[ic][q][h][n] * diff_conj_inv * diff_conj_inv; } shared[threadIdx.x] = val; __syncthreads(); val = at::native::cuda_utils::BlockReduceSum<scalar_t>(val, shared); if (threadIdx.x == 0) { dc[q][n] = val; } } template <typename T> std::tuple<torch::Tensor, torch::Tensor, torch::Tensor> kernel_coefficient_backward( torch::Tensor a, torch::Tensor b, torch::Tensor c, torch::Tensor dout, torch::Tensor da, torch::Tensor db, torch::Tensor dc) { CHECK_DEVICE(a); CHECK_DEVICE(b); CHECK_DEVICE(c); CHECK_DEVICE(dout); const auto IC = a.size(0); const auto Q = a.size(1); // num heads const auto H = a.size(2); // input dim const auto N = a.size(3); // hidden dim const auto L = b.size(1); // seq length CHECK_SHAPE(b, Q, L, H); CHECK_SHAPE(c, Q, N); CHECK_SHAPE(dout, IC, Q, L, H); auto stream = at::cuda::getCurrentCUDAStream(); using scalar_t = c10::complex<T>; const auto a_p = a.packed_accessor32<scalar_t, 4, at::RestrictPtrTraits>(); const auto b_p = b.packed_accessor32<scalar_t, 3, at::RestrictPtrTraits>(); const auto c_p = c.packed_accessor32<scalar_t, 2, at::RestrictPtrTraits>(); const auto dout_p = dout.packed_accessor32<scalar_t, 4, at::RestrictPtrTraits>(); auto da_p = da.packed_accessor32<scalar_t, 4, at::RestrictPtrTraits>(); auto db_p = db.packed_accessor32<scalar_t, 3, at::RestrictPtrTraits>(); auto dc_p = dc.packed_accessor32<scalar_t, 2, at::RestrictPtrTraits>(); dim3 grid_da(IC, Q, H * N); dim3 blocka(REDUCE_THREADS_A); dim3 grid_db(Q, L, H); dim3 blockb(REDUCE_THREADS_B); dim3 grid_dc(Q, N); dim3 blockc(REDUCE_THREADS_C); kernel_coefficient_backward_a_kernel<REDUCE_THREADS_A, scalar_t> <<<grid_da, blocka, 0, stream>>>(a_p, b_p, c_p, dout_p, da_p, L, H); kernel_coefficient_backward_b_kernel<REDUCE_THREADS_B, scalar_t> <<<grid_db, blockb, 0, stream>>>(a_p, b_p, c_p, dout_p, db_p, N, IC); kernel_coefficient_backward_c_kernel<REDUCE_THREADS_C, scalar_t> <<<grid_dc, blockc, 0, stream>>>(a_p, b_p, c_p, dout_p, dc_p, IC, H, L); /*int G = IC * H * N + L * H + N; dim3 grid((G + L - 1) / L, L, Q); dim3 block(REDUCE_THREADS); kernel_coefficient_backward_abc_kernel<REDUCE_THREADS, scalar_t> <<<grid, block, 0, stream>>>( a_p, b_p, c_p, dout_p, da_p, db_p, dc_p, Q, IC, L, H, N, G);*/ return std::make_tuple(da, db, dc); } // Instantiate for floating point types template torch::Tensor kernel_coefficient_forward<float>(torch::Tensor a, torch::Tensor b, torch::Tensor c, torch::Tensor out); template torch::Tensor kernel_coefficient_forward<double>(torch::Tensor a, torch::Tensor b, torch::Tensor c, torch::Tensor out); template std::tuple<torch::Tensor, torch::Tensor, torch::Tensor> kernel_coefficient_backward<float>( torch::Tensor a, torch::Tensor b, torch::Tensor c, torch::Tensor dout, torch::Tensor da, torch::Tensor db, torch::Tensor dc); template std::tuple<torch::Tensor, torch::Tensor, torch::Tensor> kernel_coefficient_backward<double>( torch::Tensor a, torch::Tensor b, torch::Tensor c, torch::Tensor dout, torch::Tensor da, torch::Tensor db, torch::Tensor dc); // int mlp_fp<float>( // float* X, // int input_features, // int batch_size, // float** WPtr, // float** BPtr, // int num_layers, // int* output_features, // float* Y, // float* reserved_space, // float* reserved_activations, // uint8_t* reserved_mask, // void* lt_workspace, // float p); // PYBIND11_MODULE(ssm_kernel_coefficient_binding, m) { // m.def("kernel_coefficient_forward_float", &kernel_coefficient_forward<float>); // m.def( // "kernel_coefficient_forward_double", &kernel_coefficient_forward<double>); // m.def( // "kernel_coefficient_backward_float", &kernel_coefficient_backward<float>); // m.def( // "kernel_coefficient_backward_double", // &kernel_coefficient_backward<double>); // }
ffae6ebb5cabcdab4726e81b6f68eb8276c9d32f.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /* -- MAGMA (version 1.6.1) -- Univ. of Tennessee, Knoxville Univ. of California, Berkeley Univ. of Colorado, Denver @date January 2015 @author Mark Gates @generated from zgemv_fermi.cu normal z -> c, Fri Jan 30 19:00:10 2015 */ #include "common_magma.h" #include "commonblas_c.h" #include "magma_templates.h" #define PRECISION_c #define BLK_X 128 #define BLK_Y 128 /* Compute y = alpha*A*x + beta*y. * Each thread block does a BLK_X x N block row of A. * Each thread goes across one row, accumulating dot product of row ind and x into res. * This simple implementation loads x directly, relying on the cache, * without using shared memory. */ __global__ void cgemvn_kernel1_fermi( int m, int n, magmaFloatComplex alpha, const magmaFloatComplex * __restrict__ A, int lda, const magmaFloatComplex * __restrict__ x, int incx, magmaFloatComplex beta, magmaFloatComplex * __restrict__ y, int incy) { #if (__CUDA_ARCH__ >= 200) int ind = blockIdx.x*BLK_X + threadIdx.x; if ( ind < m ) { A += ind; magmaFloatComplex res = MAGMA_C_ZERO; #pragma unroll for(int j=0; j < n; j++) { res += A[j*lda] * x[j*incx]; } y[ind*incy] = alpha*res + beta*y[ind*incy]; } #endif /* (__CUDA_ARCH__ >= 200) */ } /* Compute y = alpha*A*x + beta*y. * Each thread block does a BLK_X x N block row of A. * Each thread goes across one row, accumulating dot product of row ind and x into res. * This implementation loads BLK_Y elements into sx, then multiplies * BLK_Y columns of A*sx. */ __global__ void cgemvn_kernel2_fermi( int m, int n, magmaFloatComplex alpha, const magmaFloatComplex * __restrict__ A, int lda, const magmaFloatComplex * __restrict__ x, int incx, magmaFloatComplex beta, magmaFloatComplex * __restrict__ y, int incy) { #if (__CUDA_ARCH__ >= 200) int ind = blockIdx.x*BLK_X + threadIdx.x; // threads past last row redundantly work on last row A += min( ind, m-1 ); x += threadIdx.x*incx; magmaFloatComplex res = MAGMA_C_ZERO; __shared__ magmaFloatComplex sx[BLK_Y]; // full block-columns int nfull = (n / BLK_Y) * BLK_Y; for( int j=0; j < nfull; j += BLK_Y ) { // load BLK_Y elements of x into sx sx[threadIdx.x] = x[0]; x += BLK_Y*incx; __syncthreads(); // multiply A*sx #pragma unroll for(int j2=0; j2 < BLK_Y; j2++) { res += A[0] * sx[j2]; A += lda; } __syncthreads(); } // last, partial block-column // load remaining npart elements of x into sx int npart = n % BLK_Y; if ( threadIdx.x < npart ) { sx[threadIdx.x] = x[0]; } else { sx[threadIdx.x] = MAGMA_C_ZERO; } __syncthreads(); // multiply A*sx #pragma unroll for(int j2=0; j2 < npart; j2++) { res += A[0]*sx[j2]; A += lda; } if ( ind < m ) { y[ind*incy] = alpha*res + beta*y[ind*incy]; } #endif /* (__CUDA_ARCH__ >= 200) */ } /* Compute y = alpha * A^T * x + beta*y. * Each thread block does one column of A (i.e., one row of A^T). * Each thread does a partial sum, then collectively they do a reduction. */ __global__ void cgemvt_kernel_fermi( int m, int n, magmaFloatComplex alpha, const magmaFloatComplex * __restrict__ A, int lda, const magmaFloatComplex * __restrict__ x, int incx, magmaFloatComplex beta, magmaFloatComplex * __restrict__ y, int incy) { #if (__CUDA_ARCH__ >= 200) int tx = threadIdx.x; __shared__ magmaFloatComplex sdata[BLK_X]; magmaFloatComplex res = MAGMA_C_ZERO; A += blockIdx.y*lda + threadIdx.x; // partial sums int mfull = (m / BLK_X) * BLK_X; for(int i=0; i < mfull; i += BLK_X) { res += A[i] * x[tx + i]; } if ( tx + mfull < m ) { res += A[mfull] * x[tx + mfull]; } sdata[tx] = res; // tree reduction of partial sums, // from BLK_X sums to ... 128 to 64 to 32 ... to 1 sum in sdata[0] magma_sum_reduce< BLK_X >( tx, sdata ); if ( tx == 0 ) { y[blockIdx.y*incy] = alpha*sdata[0] + beta*y[blockIdx.y*incy]; } #endif /* (__CUDA_ARCH__ >= 200) */ } /* Compute y = alpha * A^H * x + beta*y. * Same as cgemvt_kernel_fermi but conjugates entries of A. */ __global__ void cgemvc_kernel_fermi( int m, int n, magmaFloatComplex alpha, const magmaFloatComplex * __restrict__ A, int lda, const magmaFloatComplex * __restrict__ x, int incx, magmaFloatComplex beta, magmaFloatComplex * __restrict__ y, int incy) { #if (__CUDA_ARCH__ >= 200) int tx = threadIdx.x; __shared__ magmaFloatComplex sdata[BLK_X]; magmaFloatComplex res = MAGMA_C_ZERO; A += blockIdx.y*lda + threadIdx.x; // partial sums int mfull = (m / BLK_X) * BLK_X; for(int i=0; i < mfull; i += BLK_X) { res += conj(A[i]) * x[tx + i]; } if ( tx + mfull < m ) { res += conj(A[mfull]) * x[tx + mfull]; } sdata[tx] = res; // tree reduction of partial sums, // from BLK_X sums to ... 128 to 64 to 32 ... to 1 sum in sdata[0] magma_sum_reduce< BLK_X >( tx, sdata ); if ( tx == 0 ) { y[blockIdx.y*incy] = alpha*sdata[0] + beta*y[blockIdx.y*incy]; } #endif /* (__CUDA_ARCH__ >= 200) */ } /** Purpose ------- CGEMV performs one of the matrix-vector operations y := alpha*A*x + beta*y, or y := alpha*A**T*x + beta*y, or y := alpha*A**H*x + beta*y, where alpha and beta are scalars, x and y are vectors and A is an m by n matrix. Arguments ---------- @param[in] trans magma_trans_t On entry, TRANS specifies the operation to be performed as follows: - = MagmaNoTrans: y := alpha*A *x + beta*y - = MagmaTrans: y := alpha*A^T*x + beta*y - = MagmaConjTrans: y := alpha*A^H*x + beta*y @param[in] m INTEGER On entry, m specifies the number of rows of the matrix A. @param[in] n INTEGER On entry, n specifies the number of columns of the matrix A @param[in] alpha COMPLEX On entry, ALPHA specifies the scalar alpha. @param[in] dA COMPLEX array of dimension ( LDA, n ) on the GPU. @param[in] lda INTEGER LDA specifies the leading dimension of A. @param[in] dx COMPLEX array of dimension n if trans == MagmaNoTrans m if trans == MagmaTrans or MagmaConjTrans @param[in] incx Specifies the increment for the elements of X. INCX must not be zero. @param[in] beta DOUBLE REAL On entry, BETA specifies the scalar beta. When BETA is supplied as zero then Y need not be set on input. @param[out] dy REAL array of dimension m if trans == MagmaNoTrans n if trans == MagmaTrans or MagmaConjTrans @param[in] incy Specifies the increment for the elements of Y. INCY must not be zero. @ingroup magma_dblas2 ********************************************************************/ extern "C" void magmablas_cgemv( magma_trans_t trans, magma_int_t m, magma_int_t n, magmaFloatComplex alpha, magmaFloatComplex_const_ptr dA, magma_int_t ldda, magmaFloatComplex_const_ptr dx, magma_int_t incx, magmaFloatComplex beta, magmaFloatComplex_ptr dy, magma_int_t incy) { magma_int_t info = 0; if ( trans != MagmaNoTrans && trans != MagmaTrans && trans != MagmaConjTrans ) info = -1; else if ( m < 0 ) info = -2; else if ( n < 0 ) info = -3; else if ( ldda < m ) info = -6; else if ( incx == 0 ) info = -8; else if ( incy == 0 ) info = -11; if (info != 0) { magma_xerbla( __func__, -(info) ); return; //info; } magma_int_t arch = magma_getdevice_arch(); if ( arch < 200 ) { // -------------------- // call CUDA ARCH 1.x version // magmablas for [sd] precisions, cublas for [zc] precisions. #if defined(PRECISION_z) || defined(PRECISION_c) magma_cgemv( trans, m, n, alpha, dA, ldda, dx, incx, beta, dy, incy ); #else magmablas_cgemv_tesla( trans, m, n, alpha, dA, ldda, dx, incx, beta, dy, incy ); #endif return; } // -------------------- // CUDA ARCH 2.x (Fermi) version if ( trans == MagmaNoTrans ) { dim3 grid( (m - 1)/BLK_X + 1 ); dim3 threads( BLK_X, 1, 1 ); hipLaunchKernelGGL(( cgemvn_kernel1_fermi), dim3(grid), dim3(threads), 0, magma_stream , m, n, alpha, dA, ldda, dx, incx, beta, dy, incy ); } else if ( trans == MagmaTrans ) { dim3 grid ( 1, n, 1 ); dim3 threads ( BLK_X, 1, 1 ); hipLaunchKernelGGL(( cgemvt_kernel_fermi), dim3(grid), dim3(threads), 0, magma_stream , m, n, alpha, dA, ldda, dx, incx, beta, dy, incy ); } else if ( trans == MagmaConjTrans ) { dim3 grid ( 1, n, 1 ); dim3 threads ( BLK_X, 1, 1 ); hipLaunchKernelGGL(( cgemvc_kernel_fermi), dim3(grid), dim3(threads), 0, magma_stream , m, n, alpha, dA, ldda, dx, incx, beta, dy, incy ); } }
ffae6ebb5cabcdab4726e81b6f68eb8276c9d32f.cu
/* -- MAGMA (version 1.6.1) -- Univ. of Tennessee, Knoxville Univ. of California, Berkeley Univ. of Colorado, Denver @date January 2015 @author Mark Gates @generated from zgemv_fermi.cu normal z -> c, Fri Jan 30 19:00:10 2015 */ #include "common_magma.h" #include "commonblas_c.h" #include "magma_templates.h" #define PRECISION_c #define BLK_X 128 #define BLK_Y 128 /* Compute y = alpha*A*x + beta*y. * Each thread block does a BLK_X x N block row of A. * Each thread goes across one row, accumulating dot product of row ind and x into res. * This simple implementation loads x directly, relying on the cache, * without using shared memory. */ __global__ void cgemvn_kernel1_fermi( int m, int n, magmaFloatComplex alpha, const magmaFloatComplex * __restrict__ A, int lda, const magmaFloatComplex * __restrict__ x, int incx, magmaFloatComplex beta, magmaFloatComplex * __restrict__ y, int incy) { #if (__CUDA_ARCH__ >= 200) int ind = blockIdx.x*BLK_X + threadIdx.x; if ( ind < m ) { A += ind; magmaFloatComplex res = MAGMA_C_ZERO; #pragma unroll for(int j=0; j < n; j++) { res += A[j*lda] * x[j*incx]; } y[ind*incy] = alpha*res + beta*y[ind*incy]; } #endif /* (__CUDA_ARCH__ >= 200) */ } /* Compute y = alpha*A*x + beta*y. * Each thread block does a BLK_X x N block row of A. * Each thread goes across one row, accumulating dot product of row ind and x into res. * This implementation loads BLK_Y elements into sx, then multiplies * BLK_Y columns of A*sx. */ __global__ void cgemvn_kernel2_fermi( int m, int n, magmaFloatComplex alpha, const magmaFloatComplex * __restrict__ A, int lda, const magmaFloatComplex * __restrict__ x, int incx, magmaFloatComplex beta, magmaFloatComplex * __restrict__ y, int incy) { #if (__CUDA_ARCH__ >= 200) int ind = blockIdx.x*BLK_X + threadIdx.x; // threads past last row redundantly work on last row A += min( ind, m-1 ); x += threadIdx.x*incx; magmaFloatComplex res = MAGMA_C_ZERO; __shared__ magmaFloatComplex sx[BLK_Y]; // full block-columns int nfull = (n / BLK_Y) * BLK_Y; for( int j=0; j < nfull; j += BLK_Y ) { // load BLK_Y elements of x into sx sx[threadIdx.x] = x[0]; x += BLK_Y*incx; __syncthreads(); // multiply A*sx #pragma unroll for(int j2=0; j2 < BLK_Y; j2++) { res += A[0] * sx[j2]; A += lda; } __syncthreads(); } // last, partial block-column // load remaining npart elements of x into sx int npart = n % BLK_Y; if ( threadIdx.x < npart ) { sx[threadIdx.x] = x[0]; } else { sx[threadIdx.x] = MAGMA_C_ZERO; } __syncthreads(); // multiply A*sx #pragma unroll for(int j2=0; j2 < npart; j2++) { res += A[0]*sx[j2]; A += lda; } if ( ind < m ) { y[ind*incy] = alpha*res + beta*y[ind*incy]; } #endif /* (__CUDA_ARCH__ >= 200) */ } /* Compute y = alpha * A^T * x + beta*y. * Each thread block does one column of A (i.e., one row of A^T). * Each thread does a partial sum, then collectively they do a reduction. */ __global__ void cgemvt_kernel_fermi( int m, int n, magmaFloatComplex alpha, const magmaFloatComplex * __restrict__ A, int lda, const magmaFloatComplex * __restrict__ x, int incx, magmaFloatComplex beta, magmaFloatComplex * __restrict__ y, int incy) { #if (__CUDA_ARCH__ >= 200) int tx = threadIdx.x; __shared__ magmaFloatComplex sdata[BLK_X]; magmaFloatComplex res = MAGMA_C_ZERO; A += blockIdx.y*lda + threadIdx.x; // partial sums int mfull = (m / BLK_X) * BLK_X; for(int i=0; i < mfull; i += BLK_X) { res += A[i] * x[tx + i]; } if ( tx + mfull < m ) { res += A[mfull] * x[tx + mfull]; } sdata[tx] = res; // tree reduction of partial sums, // from BLK_X sums to ... 128 to 64 to 32 ... to 1 sum in sdata[0] magma_sum_reduce< BLK_X >( tx, sdata ); if ( tx == 0 ) { y[blockIdx.y*incy] = alpha*sdata[0] + beta*y[blockIdx.y*incy]; } #endif /* (__CUDA_ARCH__ >= 200) */ } /* Compute y = alpha * A^H * x + beta*y. * Same as cgemvt_kernel_fermi but conjugates entries of A. */ __global__ void cgemvc_kernel_fermi( int m, int n, magmaFloatComplex alpha, const magmaFloatComplex * __restrict__ A, int lda, const magmaFloatComplex * __restrict__ x, int incx, magmaFloatComplex beta, magmaFloatComplex * __restrict__ y, int incy) { #if (__CUDA_ARCH__ >= 200) int tx = threadIdx.x; __shared__ magmaFloatComplex sdata[BLK_X]; magmaFloatComplex res = MAGMA_C_ZERO; A += blockIdx.y*lda + threadIdx.x; // partial sums int mfull = (m / BLK_X) * BLK_X; for(int i=0; i < mfull; i += BLK_X) { res += conj(A[i]) * x[tx + i]; } if ( tx + mfull < m ) { res += conj(A[mfull]) * x[tx + mfull]; } sdata[tx] = res; // tree reduction of partial sums, // from BLK_X sums to ... 128 to 64 to 32 ... to 1 sum in sdata[0] magma_sum_reduce< BLK_X >( tx, sdata ); if ( tx == 0 ) { y[blockIdx.y*incy] = alpha*sdata[0] + beta*y[blockIdx.y*incy]; } #endif /* (__CUDA_ARCH__ >= 200) */ } /** Purpose ------- CGEMV performs one of the matrix-vector operations y := alpha*A*x + beta*y, or y := alpha*A**T*x + beta*y, or y := alpha*A**H*x + beta*y, where alpha and beta are scalars, x and y are vectors and A is an m by n matrix. Arguments ---------- @param[in] trans magma_trans_t On entry, TRANS specifies the operation to be performed as follows: - = MagmaNoTrans: y := alpha*A *x + beta*y - = MagmaTrans: y := alpha*A^T*x + beta*y - = MagmaConjTrans: y := alpha*A^H*x + beta*y @param[in] m INTEGER On entry, m specifies the number of rows of the matrix A. @param[in] n INTEGER On entry, n specifies the number of columns of the matrix A @param[in] alpha COMPLEX On entry, ALPHA specifies the scalar alpha. @param[in] dA COMPLEX array of dimension ( LDA, n ) on the GPU. @param[in] lda INTEGER LDA specifies the leading dimension of A. @param[in] dx COMPLEX array of dimension n if trans == MagmaNoTrans m if trans == MagmaTrans or MagmaConjTrans @param[in] incx Specifies the increment for the elements of X. INCX must not be zero. @param[in] beta DOUBLE REAL On entry, BETA specifies the scalar beta. When BETA is supplied as zero then Y need not be set on input. @param[out] dy REAL array of dimension m if trans == MagmaNoTrans n if trans == MagmaTrans or MagmaConjTrans @param[in] incy Specifies the increment for the elements of Y. INCY must not be zero. @ingroup magma_dblas2 ********************************************************************/ extern "C" void magmablas_cgemv( magma_trans_t trans, magma_int_t m, magma_int_t n, magmaFloatComplex alpha, magmaFloatComplex_const_ptr dA, magma_int_t ldda, magmaFloatComplex_const_ptr dx, magma_int_t incx, magmaFloatComplex beta, magmaFloatComplex_ptr dy, magma_int_t incy) { magma_int_t info = 0; if ( trans != MagmaNoTrans && trans != MagmaTrans && trans != MagmaConjTrans ) info = -1; else if ( m < 0 ) info = -2; else if ( n < 0 ) info = -3; else if ( ldda < m ) info = -6; else if ( incx == 0 ) info = -8; else if ( incy == 0 ) info = -11; if (info != 0) { magma_xerbla( __func__, -(info) ); return; //info; } magma_int_t arch = magma_getdevice_arch(); if ( arch < 200 ) { // -------------------- // call CUDA ARCH 1.x version // magmablas for [sd] precisions, cublas for [zc] precisions. #if defined(PRECISION_z) || defined(PRECISION_c) magma_cgemv( trans, m, n, alpha, dA, ldda, dx, incx, beta, dy, incy ); #else magmablas_cgemv_tesla( trans, m, n, alpha, dA, ldda, dx, incx, beta, dy, incy ); #endif return; } // -------------------- // CUDA ARCH 2.x (Fermi) version if ( trans == MagmaNoTrans ) { dim3 grid( (m - 1)/BLK_X + 1 ); dim3 threads( BLK_X, 1, 1 ); cgemvn_kernel1_fermi<<< grid, threads, 0, magma_stream >>> ( m, n, alpha, dA, ldda, dx, incx, beta, dy, incy ); } else if ( trans == MagmaTrans ) { dim3 grid ( 1, n, 1 ); dim3 threads ( BLK_X, 1, 1 ); cgemvt_kernel_fermi<<< grid, threads, 0, magma_stream >>> ( m, n, alpha, dA, ldda, dx, incx, beta, dy, incy ); } else if ( trans == MagmaConjTrans ) { dim3 grid ( 1, n, 1 ); dim3 threads ( BLK_X, 1, 1 ); cgemvc_kernel_fermi<<< grid, threads, 0, magma_stream >>> ( m, n, alpha, dA, ldda, dx, incx, beta, dy, incy ); } }
0241e863940df3ae6f3d368868f6137dcd297fb7.hip
// !!! This is a file automatically generated by hipify!!! #define GLM_FORCE_CUDA #include <stdio.h> #include <hip/hip_runtime.h> #include <cmath> #include <glm/glm.hpp> #include "utilityCore.hpp" #include "kernel.h" // LOOK-2.1 potentially useful for doing grid-based neighbor search #ifndef imax #define imax( a, b ) ( ((a) > (b)) ? (a) : (b) ) #endif #ifndef imin #define imin( a, b ) ( ((a) < (b)) ? (a) : (b) ) #endif #define checkCUDAErrorWithLine(msg) checkCUDAError(msg, __LINE__) /** * Check for CUDA errors; print and exit if there was a problem. */ void checkCUDAError(const char *msg, int line = -1) { hipError_t err = hipGetLastError(); if (hipSuccess != err) { if (line >= 0) { fprintf(stderr, "Line %d: ", line); } fprintf(stderr, "Cuda error: %s: %s.\n", msg, hipGetErrorString(err)); exit(EXIT_FAILURE); } } /***************** * Configuration * *****************/ /*! Block size used for CUDA kernel launch. */ #define blockSize 128 // LOOK-1.2 Parameters for the boids algorithm. // These worked well in our reference implementation. #define rule1Distance 5.0f #define rule2Distance 1.0f #define rule3Distance 5.0f #define rule1Scale 0.01f #define rule2Scale 0.1f #define rule3Scale 0.1f #define maxSpeed 1.0f /*! Size of the starting area in simulation space. */ #define scene_scale 100.0f /*********************************************** * Kernel state (pointers are device pointers) * ***********************************************/ int numObjects; dim3 threadsPerBlock(blockSize); // LOOK-1.2 - These buffers are here to hold all your boid information. // These get allocated for you in Boids::initSimulation. // Consider why you would need two velocity buffers in a simulation where each // boid cares about its neighbors' velocities. // These are called ping-pong buffers. glm::vec3 *dev_pos1; glm::vec3 *dev_pos2; glm::vec3 *dev_vel1; glm::vec3 *dev_vel2; // LOOK-2.1 - these are NOT allocated for you. You'll have to set up the thrust // pointers on your own too. // For efficient sorting and the uniform grid. These should always be parallel. int *dev_particleArrayIndices; // What index in dev_pos and dev_velX represents this particle? int *dev_particleGridIndices; // What grid cell is this particle in? // needed for use with thrust thrust::device_ptr<int> dev_thrust_particleArrayIndices; thrust::device_ptr<int> dev_thrust_particleGridIndices; int *dev_gridCellStartIndices; // What part of dev_particleArrayIndices belongs int *dev_gridCellEndIndices; // to this cell? // TODO-2.3 - consider what additional buffers you might need to reshuffle // the position and velocity data to be coherent within cells. // LOOK-2.1 - Grid parameters based on simulation parameters. // These are automatically computed for you in Boids::initSimulation int gridCellCount; int gridResolution; float gridCellWidth; float gridInverseCellWidth; glm::vec3 gridMinimum; /****************** * initSimulation * ******************/ __host__ __device__ unsigned int hash(unsigned int a) { a = (a + 0x7ed55d16) + (a << 12); a = (a ^ 0xc761c23c) ^ (a >> 19); a = (a + 0x165667b1) + (a << 5); a = (a + 0xd3a2646c) ^ (a << 9); a = (a + 0xfd7046c5) + (a << 3); a = (a ^ 0xb55a4f09) ^ (a >> 16); return a; } /** * LOOK-1.2 - this is a typical helper function for a CUDA kernel. * Function for generating a random vec3. */ __host__ __device__ glm::vec3 generateRandomVec3(float time, int index) { thrust::default_random_engine rng(hash((int)(index * time))); thrust::uniform_real_distribution<float> unitDistrib(-1, 1); return glm::vec3((float)unitDistrib(rng), (float)unitDistrib(rng), (float)unitDistrib(rng)); } /** * LOOK-1.2 - This is a basic CUDA kernel. * CUDA kernel for generating boids with a specified mass randomly around the star. */ __global__ void kernGenerateRandomPosArray(int time, int N, glm::vec3 * arr, float scale) { int index = (blockIdx.x * blockDim.x) + threadIdx.x; if (index < N) { glm::vec3 rand = generateRandomVec3(time, index); arr[index].x = scale * rand.x; arr[index].y = scale * rand.y; arr[index].z = scale * rand.z; } } /** * Initialize memory, update some globals */ void Boids::initSimulation(int N) { numObjects = N; dim3 fullBlocksPerGrid((N + blockSize - 1) / blockSize); // LOOK-1.2 - This is basic CUDA memory management and error checking. // Don't forget to hipFree in Boids::endSimulation. hipMalloc((void**)&dev_pos1, N * sizeof(glm::vec3)); checkCUDAErrorWithLine("hipMalloc dev_pos1 failed!"); hipMalloc((void**)&dev_pos2, N * sizeof(glm::vec3)); checkCUDAErrorWithLine("hipMalloc dev_pos2 failed!"); hipMalloc((void**)&dev_vel1, N * sizeof(glm::vec3)); checkCUDAErrorWithLine("hipMalloc dev_vel1 failed!"); hipMalloc((void**)&dev_vel2, N * sizeof(glm::vec3)); checkCUDAErrorWithLine("hipMalloc dev_vel2 failed!"); // LOOK-1.2 - This is a typical CUDA kernel invocation. kernGenerateRandomPosArray << <fullBlocksPerGrid, blockSize >> >(1, numObjects, dev_pos1, scene_scale); checkCUDAErrorWithLine("kernGenerateRandomPosArray failed!"); // LOOK-2.1 computing grid params gridCellWidth = 2.0f * ::max(::max(rule1Distance, rule2Distance), rule3Distance); int halfSideCount = (int)(scene_scale / gridCellWidth) + 1; gridResolution = 2 * halfSideCount; gridCellCount = gridResolution * gridResolution * gridResolution; gridInverseCellWidth = 1.0f / gridCellWidth; float halfGridWidth = gridCellWidth * halfSideCount; gridMinimum.x -= halfGridWidth; gridMinimum.y -= halfGridWidth; gridMinimum.z -= halfGridWidth; // TODO-2.1 TODO-2.3 - Allocate additional buffers here. hipMalloc((void**)&dev_particleArrayIndices, N * sizeof(int)); checkCUDAErrorWithLine("hipMalloc dev_particleArrayIndices failed!"); hipMalloc((void**)&dev_particleGridIndices, N * sizeof(int)); checkCUDAErrorWithLine("hipMalloc dev_particleGridIndices failed!"); hipMalloc((void**)&dev_gridCellStartIndices, gridCellCount * sizeof(int)); checkCUDAErrorWithLine("hipMalloc dev_gridCellStartIndices failed!"); hipMalloc((void**)&dev_gridCellEndIndices, gridCellCount * sizeof(int)); checkCUDAErrorWithLine("hipMalloc dev_gridCellEndIndices failed!"); hipDeviceSynchronize(); } /****************** * copyBoidsToVBO * ******************/ /** * Copy the boid positions into the VBO so that they can be drawn by OpenGL. */ __global__ void kernCopyPositionsToVBO(int N, glm::vec3 *pos, float *vbo, float s_scale) { int index = threadIdx.x + (blockIdx.x * blockDim.x); float c_scale = -1.0f / s_scale; if (index < N) { vbo[4 * index + 0] = pos[index].x * c_scale; vbo[4 * index + 1] = pos[index].y * c_scale; vbo[4 * index + 2] = pos[index].z * c_scale; vbo[4 * index + 3] = 1.0f; } } __global__ void kernCopyVelocitiesToVBO(int N, glm::vec3 *vel, float *vbo, float s_scale) { int index = threadIdx.x + (blockIdx.x * blockDim.x); if (index < N) { vbo[4 * index + 0] = vel[index].x + 0.3f; vbo[4 * index + 1] = vel[index].y + 0.3f; vbo[4 * index + 2] = vel[index].z + 0.3f; vbo[4 * index + 3] = 1.0f; } } /** * Wrapper for call to the kernCopyboidsToVBO CUDA kernel. */ void Boids::copyBoidsToVBO(float *vbodptr_positions, float *vbodptr_velocities) { dim3 fullBlocksPerGrid((numObjects + blockSize - 1) / blockSize); kernCopyPositionsToVBO << <fullBlocksPerGrid, blockSize >> >(numObjects, dev_pos1, vbodptr_positions, scene_scale); kernCopyVelocitiesToVBO << <fullBlocksPerGrid, blockSize >> >(numObjects, dev_vel1, vbodptr_velocities, scene_scale); checkCUDAErrorWithLine("copyBoidsToVBO failed!"); hipDeviceSynchronize(); } /****************** * stepSimulation * ******************/ /** * LOOK-1.2 You can use this as a helper for kernUpdateVelocityBruteForce. * __device__ code can be called from a __global__ context * Compute the new velocity on the body with index `iSelf` due to the `N` boids * in the `pos` and `vel` arrays. */ __device__ glm::vec3 computeVelocityChange(int N, int iSelf, const glm::vec3 *pos, const glm::vec3 *vel) { float count1 = 0.f; float count3 = 0.f; glm::vec3 spos = pos[iSelf]; glm::vec3 v1(0.f), v2(0.f), v3(0.f); for (int i = 0; i < N; ++i) { // Rule 1: boids fly towards their local perceived center of mass, which excludes themselves if (i != iSelf) { glm::vec3 bpos = pos[i]; float dist = glm::distance(spos, bpos); if (dist < rule1Distance) { v1 += bpos; count1++; } // Rule 2: boids try to stay a distance d away from each other if (dist < rule2Distance) { v2 -= (bpos - spos); } // Rule 3: boids try to match the speed of surrounding boids if (dist < rule3Distance) { v3 += vel[i]; count3++; } } } v1 = (count1 > 0) ? v1 / count1 - spos : glm::vec3(0.f); v3 = (count3 > 0) ? v3 / count3 : glm::vec3(0.f); return v1*rule1Scale + v2*rule2Scale + v3*rule3Scale; } /** * TODO-1.2 implement basic flocking * For each of the `N` bodies, update its position based on its current velocity. */ __global__ void kernUpdateVelocityBruteForce(int N, glm::vec3 *pos, glm::vec3 *vel1, glm::vec3 *vel2) { int index = (blockIdx.x * blockDim.x) + threadIdx.x; if (index >= N) return; // Compute a new velocity based on pos and vel1 glm::vec3 vel = vel1[index] + computeVelocityChange(N, index, pos, vel1); // Clamp the speed vel = (glm::length(vel) > maxSpeed) ? glm::normalize(vel) * maxSpeed : vel; // Record the new velocity into vel2. Question: why NOT vel1? read / write vel2[index] = vel; } /** * LOOK-1.2 Since this is pretty trivial, we implemented it for you. * For each of the `N` bodies, update its position based on its current velocity. */ __global__ void kernUpdatePos(int N, float dt, glm::vec3 *pos, glm::vec3 *vel) { // Update position by velocity int index = threadIdx.x + (blockIdx.x * blockDim.x); if (index >= N) { return; } glm::vec3 thisPos = pos[index]; thisPos += vel[index] * dt; // Wrap the boids around so we don't lose them thisPos.x = thisPos.x < -scene_scale ? scene_scale : thisPos.x; thisPos.y = thisPos.y < -scene_scale ? scene_scale : thisPos.y; thisPos.z = thisPos.z < -scene_scale ? scene_scale : thisPos.z; thisPos.x = thisPos.x > scene_scale ? -scene_scale : thisPos.x; thisPos.y = thisPos.y > scene_scale ? -scene_scale : thisPos.y; thisPos.z = thisPos.z > scene_scale ? -scene_scale : thisPos.z; pos[index] = thisPos; } __global__ void kernUpdateCoherentPos(int N, float dt, glm::vec3 *in, glm::vec3 *out, glm::vec3 *vel) { // Update position by velocity int index = threadIdx.x + (blockIdx.x * blockDim.x); if (index >= N) { return; } glm::vec3 thisPos = in[index]; thisPos += vel[index] * dt; // Wrap the boids around so we don't lose them thisPos.x = thisPos.x < -scene_scale ? scene_scale : thisPos.x; thisPos.y = thisPos.y < -scene_scale ? scene_scale : thisPos.y; thisPos.z = thisPos.z < -scene_scale ? scene_scale : thisPos.z; thisPos.x = thisPos.x > scene_scale ? -scene_scale : thisPos.x; thisPos.y = thisPos.y > scene_scale ? -scene_scale : thisPos.y; thisPos.z = thisPos.z > scene_scale ? -scene_scale : thisPos.z; out[index] = thisPos; } // LOOK-2.1 Consider this method of computing a 1D index from a 3D grid index. // LOOK-2.3 Looking at this method, what would be the most memory efficient // order for iterating over neighboring grid cells? // for(z) // for(y) // for(x) __device__ int gridIndex3Dto1D(int x, int y, int z, int gridResolution) { return x + y * gridResolution + z * gridResolution * gridResolution; } __global__ void kernComputeIndices(int N, int gridRes, glm::vec3 gridMin, float inverseCellWidth, glm::vec3 *pos, int *indices, int *gridIndices) { int index = (blockIdx.x * blockDim.x) + threadIdx.x; if (index >= N) return; //printf("%f, %f, %f \n", pos[index].x, pos[index].y, pos[index].z); // TODO-2.1 // - Label each boid with the index of its grid cell (dev_particleGridIndices) glm::ivec3 boid_pos = (pos[index] - gridMin) * inverseCellWidth; gridIndices[index] = gridIndex3Dto1D(boid_pos.x, boid_pos.y, boid_pos.z, gridRes); // - Set up a parallel array of integer indices as pointers to the actual // boid data in pos and vel1/vel2 (dev_particleArrayIndices) indices[index] = index; } // LOOK-2.1 Consider how this could be useful for indicating that a cell // does not enclose any boids __global__ void kernResetIntBuffer(int N, int *intBuffer, int value) { int index = (blockIdx.x * blockDim.x) + threadIdx.x; if (index < N) { intBuffer[index] = value; } } __global__ void kernIdentifyCellStartEnd(int N, int *particleGridIndices, int *gridCellStartIndices, int *gridCellEndIndices) { // TODO-2.1 // Identify the start and end point of each cell in the gridIndices array. // This is basically a parallel unrolling of a loop that goes // "this index doesn't match the one before it, must be a new cell!" int index = (blockIdx.x * blockDim.x) + threadIdx.x; if (index >= N) return; int curr = particleGridIndices[index]; if (index == 0) { gridCellStartIndices[curr] = index; return; } int prev = particleGridIndices[index - 1]; if (prev != curr) { gridCellStartIndices[curr] = index; gridCellEndIndices[prev] = index; // not inclusive } if (index == N - 1) { gridCellEndIndices[curr] = index + 1; // not inclusive return; } } __global__ void kernUpdateVelNeighborSearchScattered( int N, int gridRes, glm::vec3 gridMin, float inverseCellWidth, float cellWidth, int *gridCellStartIndices, int *gridCellEndIndices, int *particleArrayIndices, glm::vec3 *pos, glm::vec3 *vel1, glm::vec3 *vel2) { // TODO-2.1 - Update a boid's velocity using the uniform grid to reduce // the number of boids that need to be checked. int index = (blockIdx.x * blockDim.x) + threadIdx.x; if (index >= N) return; // - Identify the grid cell that this particle is in int partIdx = particleArrayIndices[index]; glm::vec3 ppos = pos[partIdx]; glm::ivec3 partIdx3D = (ppos - gridMin) * inverseCellWidth; // - Identify which cells may contain neighbors. This isn't always 8. int neighbors[8]; //int mask[8]; int numNeighbors = 0; int nextZ = (ppos.z - cellWidth * partIdx3D.z - gridMin.z < cellWidth/2) ? -1 : 1; for (int k = 0; k < 2; k ++) { int z = partIdx3D.z + nextZ*k; if (z < 0 && z >= gridRes) break; int nextY = (ppos.y - cellWidth * partIdx3D.y - gridMin.y < cellWidth / 2) ? -1 : 1; for (int j = 0; j < 2; j ++) { int y = partIdx3D.y + nextY*j; if (y < 0 && y >= gridRes) break; int nextX = (ppos.x - cellWidth * partIdx3D.x - gridMin.x < cellWidth / 2) ? -1 : 1; for (int i = 0; i < 2; i ++) { int x = partIdx3D.x + nextX*i; if (x < 0 && x >= gridRes) break; neighbors[numNeighbors] = gridIndex3Dto1D(x, y, z, gridRes); numNeighbors++; } } } float count1 = 0.f; float count3 = 0.f; glm::vec3 v1(0.f), v2(0.f), v3(0.f); for (int i = 0; i < numNeighbors; ++i) { // - For each cell, read the start/end indices in the boid pointer array. int startIdx = gridCellStartIndices[neighbors[i]]; int endIdx = gridCellEndIndices[neighbors[i]]; // - Access each boid in the cell and compute velocity change from // the boids rules, if this boid is within the neighborhood distance. for (int j = startIdx; j < endIdx; ++j) { if (j != index) { int boidIdx = particleArrayIndices[j]; glm::vec3 bpos = pos[boidIdx]; float dist = glm::distance(ppos, bpos); // Rule 1: boids fly towards their local perceived center of mass, which excludes themselves if (dist < rule1Distance) { v1 += bpos; count1++; } // Rule 2: boids try to stay a distance d away from each other if (dist < rule2Distance) { v2 -= (bpos - ppos); } // Rule 3: boids try to match the speed of surrounding boids if (dist < rule3Distance) { v3 += vel1[boidIdx]; count3++; } } } } v1 = (count1 > 0) ? v1 / count1 - ppos : glm::vec3(0.f); v3 = (count3 > 0) ? v3 / count3 : glm::vec3(0.f); glm::vec3 vel = vel1[partIdx] + v1*rule1Scale + v2*rule2Scale + v3*rule3Scale; // Clamp the speed vel = (glm::length(vel) > maxSpeed) ? glm::normalize(vel) * maxSpeed : vel; // Record the new velocity into vel2. vel2[partIdx] = vel; } __global__ void kernShuffleV(int N, int *particleArrayIndices, glm::vec3 *new_arr, glm::vec3 *old_arr) { int index = (blockIdx.x * blockDim.x) + threadIdx.x; if (index >= N) return; new_arr[index] = old_arr[particleArrayIndices[index]]; } __global__ void kernShuffleP(int N, int *particleArrayIndices, glm::vec3 *new_arr, glm::vec3 *old_arr) { int index = (blockIdx.x * blockDim.x) + threadIdx.x; if (index >= N) return; new_arr[index] = old_arr[particleArrayIndices[index]]; //printf("%d\n", particleArrayIndices[index]); //printf("%f, %f, %f \n", old_arr[index].x, old_arr[index].y, old_arr[index].z); } __global__ void kernUpdateVelNeighborSearchCoherent( int N, int gridRes, glm::vec3 gridMin, float inverseCellWidth, float cellWidth, int *gridCellStartIndices, int *gridCellEndIndices, glm::vec3 *pos, glm::vec3 *vel1, glm::vec3 *vel2) { // TODO-2.3 - This should be very similar to kernUpdateVelNeighborSearchScattered, // except with one less level of indirection. // This should expect gridCellStartIndices and gridCellEndIndices to refer // directly to pos and vel1. int index = (blockIdx.x * blockDim.x) + threadIdx.x; if (index >= N) return; // - Identify the grid cell that this particle is in glm::vec3 ppos = pos[index]; glm::ivec3 partIdx3D = (ppos - gridMin) * inverseCellWidth; // - Identify which cells may contain neighbors. This isn't always 8. int neighbors[8]; int numNeighbors = 0; int nextZ = (ppos.z - cellWidth * partIdx3D.z - gridMin.z < cellWidth/2) ? -1 : 1; for (int k = 0; k < 2; k++) { int z = partIdx3D.z + nextZ*k; if (z < 0 && z >= gridRes) break; int nextY = (ppos.y - cellWidth * partIdx3D.y - gridMin.y < cellWidth/ 2) ? -1 : 1; for (int j = 0; j < 2; j++) { int y = partIdx3D.y + nextY*j; if (y < 0 && y >= gridRes) break; int nextX = (ppos.x - cellWidth * partIdx3D.x - gridMin.x < cellWidth / 2) ? -1 : 1; for (int i = 0; i < 2; i++) { int x = partIdx3D.x + nextX*i; if (x < 0 && x >= gridRes) break; neighbors[numNeighbors] = gridIndex3Dto1D(x, y, z, gridRes); numNeighbors++; } } } float count1 = 0.f; float count3 = 0.f; glm::vec3 v1(0.f), v2(0.f), v3(0.f); for (int i = 0; i < numNeighbors; ++i) { // - For each cell, read the start/end indices in the boid pointer array. int startIdx = gridCellStartIndices[neighbors[i]]; int endIdx = gridCellEndIndices[neighbors[i]]; // DIFFERENCE: For best results, consider what order the cells should be // checked in to maximize the memory benefits of reordering the boids data. // - Access each boid in the cell and compute velocity change from // the boids rules, if this boid is within the neighborhood distance. for (int boidIdx = startIdx; boidIdx < endIdx; ++boidIdx) { if (boidIdx != index) { glm::vec3 bpos = pos[boidIdx]; float dist = glm::distance(ppos, bpos); // Rule 1: boids fly towards their local perceived center of mass, which excludes themselves if (dist < rule1Distance) { v1 += bpos; count1++; } // Rule 2: boids try to stay a distance d away from each other if (dist < rule2Distance) { v2 -= (bpos - ppos); } // Rule 3: boids try to match the speed of surrounding boids if (dist < rule3Distance) { v3 += vel1[boidIdx]; count3++; } } } } v1 = (count1 > 0) ? v1 / count1 - ppos : glm::vec3(0.f); v3 = (count3 > 0) ? v3 / count3 : glm::vec3(0.f); glm::vec3 vel = vel1[index] + v1*rule1Scale + v2*rule2Scale + v3*rule3Scale; // Clamp the speed vel = (glm::length(vel) > maxSpeed) ? glm::normalize(vel) * maxSpeed : vel; // Record the new velocity into vel2. vel2[index] = vel; } /** * Step the entire N-body simulation by `dt` seconds. */ void Boids::stepSimulationNaive(float dt) { dim3 blocksPerGrid((numObjects + blockSize - 1) / blockSize); // TODO-1.2 - use the kernels you wrote to step the simulation forward in time. kernUpdateVelocityBruteForce << <blocksPerGrid, blockSize >> >(numObjects, dev_pos1, dev_vel1, dev_vel2); checkCUDAErrorWithLine("kernUpdateVelocityBruteForce failed!"); kernUpdatePos << <blocksPerGrid, blockSize >> >(numObjects, dt, dev_pos1, dev_vel2); checkCUDAErrorWithLine("kernUpdatePos failed!"); // TODO-1.2 ping-pong the velocity buffers (read/write) std::swap(dev_vel1, dev_vel2); } void Boids::stepSimulationScatteredGrid(float dt) { // TODO-2.1 // Uniform Grid Neighbor search using Thrust sort. dim3 blocksPerGrid((numObjects + blockSize - 1) / blockSize); dim3 blocksPerGridCell((gridCellCount + blockSize - 1) / blockSize); // In Parallel: // - label each particle with its array index as well as its grid index. // Use 2x width grids. kernComputeIndices << <blocksPerGrid, blockSize >> >(numObjects, gridResolution, gridMinimum, gridInverseCellWidth, dev_pos1, dev_particleArrayIndices, dev_particleGridIndices); checkCUDAErrorWithLine("kernComputeIndices failed!"); // - Unstable key sort using Thrust. A stable sort isn't necessary, but you // are welcome to do a performance comparison. thrust::device_ptr<int> dev_thrust_grid(dev_particleGridIndices); thrust::device_ptr<int> dev_thrust_particles(dev_particleArrayIndices); thrust::sort_by_key(dev_thrust_grid, dev_thrust_grid + numObjects, dev_thrust_particles); // - Naively unroll the loop for finding the start and end indices of each // cell's data pointers in the array of boid indices kernResetIntBuffer << <blocksPerGridCell, blockSize >> >(gridCellCount, dev_gridCellStartIndices, -1); kernResetIntBuffer << <blocksPerGridCell, blockSize >> >(gridCellCount, dev_gridCellEndIndices, -1); checkCUDAErrorWithLine("kernResetIntBuffer failed!"); kernIdentifyCellStartEnd << <blocksPerGrid, blockSize >> >(numObjects, dev_particleGridIndices, dev_gridCellStartIndices, dev_gridCellEndIndices); checkCUDAErrorWithLine("kernIdentifyCellStartEnd failed!"); // - Perform velocity updates using neighbor search kernUpdateVelNeighborSearchScattered << <blocksPerGrid, blockSize >> >(numObjects, gridResolution, gridMinimum, gridInverseCellWidth, gridCellWidth, dev_gridCellStartIndices, dev_gridCellEndIndices, dev_particleArrayIndices, dev_pos1, dev_vel1, dev_vel2); checkCUDAErrorWithLine("kernUpdateVelNeighborSearchScattered failed!"); // - Update positions kernUpdatePos << <blocksPerGrid, blockSize >> >(numObjects, dt, dev_pos1, dev_vel2); checkCUDAErrorWithLine("kernUpdatePos failed!"); // - Ping-pong buffers as needed std::swap(dev_vel1, dev_vel2); } void Boids::stepSimulationCoherentGrid(float dt) { // TODO-2.3 - start by copying Boids::stepSimulationNaiveGrid // Uniform Grid Neighbor search using Thrust sort on cell-coherent data. dim3 blocksPerGrid((numObjects + blockSize - 1) / blockSize); dim3 blocksPerGridCell((gridCellCount + blockSize - 1) / blockSize); // In Parallel: // - label each particle with its array index as well as its grid index. // Use 2x width grids. kernComputeIndices << <blocksPerGrid, blockSize >> >(numObjects, gridResolution, gridMinimum, gridInverseCellWidth, dev_pos1, dev_particleArrayIndices, dev_particleGridIndices); checkCUDAErrorWithLine("kernComputeIndices failed!"); // - Unstable key sort using Thrust. A stable sort isn't necessary, but you // are welcome to do a performance comparison. thrust::device_ptr<int> dev_thrust_grid(dev_particleGridIndices); thrust::device_ptr<int> dev_thrust_particles(dev_particleArrayIndices); thrust::sort_by_key(dev_thrust_grid, dev_thrust_grid + numObjects, dev_thrust_particles); // - Naively unroll the loop for finding the start and end indices of each // cell's data pointers in the array of boid indices kernResetIntBuffer << <blocksPerGridCell, blockSize >> >(gridCellCount, dev_gridCellStartIndices, -1); kernResetIntBuffer << <blocksPerGridCell, blockSize >> >(gridCellCount, dev_gridCellEndIndices, -1); checkCUDAErrorWithLine("kernResetIntBuffer failed!"); kernIdentifyCellStartEnd << <blocksPerGrid, blockSize >> >(numObjects, dev_particleGridIndices, dev_gridCellStartIndices, dev_gridCellEndIndices); checkCUDAErrorWithLine("kernIdentifyCellStartEnd failed!"); //// - BIG DIFFERENCE: use the rearranged array index buffer to reshuffle all //// the particle data in the simulation array. //// CONSIDER WHAT ADDITIONAL BUFFERS YOU NEED. kernShuffleV << <blocksPerGrid, blockSize >> >(numObjects, dev_particleArrayIndices, dev_vel2, dev_vel1); // out, in kernShuffleP << <blocksPerGrid, blockSize >> >(numObjects, dev_particleArrayIndices, dev_pos2, dev_pos1); checkCUDAErrorWithLine("kernShuffle failed!"); // - Perform velocity updates using neighbor search kernUpdateVelNeighborSearchCoherent << <blocksPerGrid, blockSize >> >(numObjects, gridResolution, gridMinimum, gridInverseCellWidth, gridCellWidth, dev_gridCellStartIndices, dev_gridCellEndIndices, dev_pos2, dev_vel2, dev_vel1); checkCUDAErrorWithLine("kernUpdateVelNeighborSearchCoherent failed!"); // - Update positions into pos1 kernUpdateCoherentPos << <blocksPerGrid, blockSize >> >(numObjects, dt, dev_pos2, dev_pos1, dev_vel1); // in, out checkCUDAErrorWithLine("kernUpdatePos failed!"); // - Ping-pong buffers as needed. THIS MAY BE DIFFERENT FROM BEFORE. } void Boids::endSimulation() { hipFree(dev_vel1); hipFree(dev_vel2); hipFree(dev_pos1); // TODO-2.1 TODO-2.3 - Free any additional buffers here. hipFree(dev_gridCellStartIndices); hipFree(dev_gridCellEndIndices); hipFree(dev_particleArrayIndices); hipFree(dev_particleGridIndices); } void Boids::unitTest() { // LOOK-1.2 Feel free to write additional tests here. // test unstable sort int *dev_intKeys; int *dev_intValues; int N = 10; int *intKeys = new int[N]; int *intValues = new int[N]; intKeys[0] = 0; intValues[0] = 0; intKeys[1] = 1; intValues[1] = 1; intKeys[2] = 0; intValues[2] = 2; intKeys[3] = 3; intValues[3] = 3; intKeys[4] = 0; intValues[4] = 4; intKeys[5] = 2; intValues[5] = 5; intKeys[6] = 2; intValues[6] = 6; intKeys[7] = 0; intValues[7] = 7; intKeys[8] = 5; intValues[8] = 8; intKeys[9] = 6; intValues[9] = 9; hipMalloc((void**)&dev_intKeys, N * sizeof(int)); checkCUDAErrorWithLine("hipMalloc dev_intKeys failed!"); hipMalloc((void**)&dev_intValues, N * sizeof(int)); checkCUDAErrorWithLine("hipMalloc dev_intValues failed!"); dim3 fullBlocksPerGrid((N + blockSize - 1) / blockSize); std::cout << "before unstable sort: " << std::endl; for (int i = 0; i < N; i++) { std::cout << " key: " << intKeys[i]; std::cout << " value: " << intValues[i] << std::endl; } // How to copy data to the GPU hipMemcpy(dev_intKeys, intKeys, sizeof(int) * N, hipMemcpyHostToDevice); hipMemcpy(dev_intValues, intValues, sizeof(int) * N, hipMemcpyHostToDevice); // Wrap device vectors in thrust iterators for use with thrust. thrust::device_ptr<int> dev_thrust_keys(dev_intKeys); thrust::device_ptr<int> dev_thrust_values(dev_intValues); // LOOK-2.1 Example for using thrust::sort_by_key thrust::sort_by_key(dev_thrust_keys, dev_thrust_keys + N, dev_thrust_values); // How to copy data back to the CPU side from the GPU hipMemcpy(intKeys, dev_intKeys, sizeof(int) * N, hipMemcpyDeviceToHost); hipMemcpy(intValues, dev_intValues, sizeof(int) * N, hipMemcpyDeviceToHost); checkCUDAErrorWithLine("memcpy back failed!"); std::cout << "after unstable sort: " << std::endl; for (int i = 0; i < N; i++) { std::cout << " key: " << intKeys[i]; std::cout << " value: " << intValues[i] << std::endl; } // cleanup delete[] intKeys; delete[] intValues; hipFree(dev_intKeys); hipFree(dev_intValues); checkCUDAErrorWithLine("hipFree failed!"); return; }
0241e863940df3ae6f3d368868f6137dcd297fb7.cu
#define GLM_FORCE_CUDA #include <stdio.h> #include <cuda.h> #include <cmath> #include <glm/glm.hpp> #include "utilityCore.hpp" #include "kernel.h" // LOOK-2.1 potentially useful for doing grid-based neighbor search #ifndef imax #define imax( a, b ) ( ((a) > (b)) ? (a) : (b) ) #endif #ifndef imin #define imin( a, b ) ( ((a) < (b)) ? (a) : (b) ) #endif #define checkCUDAErrorWithLine(msg) checkCUDAError(msg, __LINE__) /** * Check for CUDA errors; print and exit if there was a problem. */ void checkCUDAError(const char *msg, int line = -1) { cudaError_t err = cudaGetLastError(); if (cudaSuccess != err) { if (line >= 0) { fprintf(stderr, "Line %d: ", line); } fprintf(stderr, "Cuda error: %s: %s.\n", msg, cudaGetErrorString(err)); exit(EXIT_FAILURE); } } /***************** * Configuration * *****************/ /*! Block size used for CUDA kernel launch. */ #define blockSize 128 // LOOK-1.2 Parameters for the boids algorithm. // These worked well in our reference implementation. #define rule1Distance 5.0f #define rule2Distance 1.0f #define rule3Distance 5.0f #define rule1Scale 0.01f #define rule2Scale 0.1f #define rule3Scale 0.1f #define maxSpeed 1.0f /*! Size of the starting area in simulation space. */ #define scene_scale 100.0f /*********************************************** * Kernel state (pointers are device pointers) * ***********************************************/ int numObjects; dim3 threadsPerBlock(blockSize); // LOOK-1.2 - These buffers are here to hold all your boid information. // These get allocated for you in Boids::initSimulation. // Consider why you would need two velocity buffers in a simulation where each // boid cares about its neighbors' velocities. // These are called ping-pong buffers. glm::vec3 *dev_pos1; glm::vec3 *dev_pos2; glm::vec3 *dev_vel1; glm::vec3 *dev_vel2; // LOOK-2.1 - these are NOT allocated for you. You'll have to set up the thrust // pointers on your own too. // For efficient sorting and the uniform grid. These should always be parallel. int *dev_particleArrayIndices; // What index in dev_pos and dev_velX represents this particle? int *dev_particleGridIndices; // What grid cell is this particle in? // needed for use with thrust thrust::device_ptr<int> dev_thrust_particleArrayIndices; thrust::device_ptr<int> dev_thrust_particleGridIndices; int *dev_gridCellStartIndices; // What part of dev_particleArrayIndices belongs int *dev_gridCellEndIndices; // to this cell? // TODO-2.3 - consider what additional buffers you might need to reshuffle // the position and velocity data to be coherent within cells. // LOOK-2.1 - Grid parameters based on simulation parameters. // These are automatically computed for you in Boids::initSimulation int gridCellCount; int gridResolution; float gridCellWidth; float gridInverseCellWidth; glm::vec3 gridMinimum; /****************** * initSimulation * ******************/ __host__ __device__ unsigned int hash(unsigned int a) { a = (a + 0x7ed55d16) + (a << 12); a = (a ^ 0xc761c23c) ^ (a >> 19); a = (a + 0x165667b1) + (a << 5); a = (a + 0xd3a2646c) ^ (a << 9); a = (a + 0xfd7046c5) + (a << 3); a = (a ^ 0xb55a4f09) ^ (a >> 16); return a; } /** * LOOK-1.2 - this is a typical helper function for a CUDA kernel. * Function for generating a random vec3. */ __host__ __device__ glm::vec3 generateRandomVec3(float time, int index) { thrust::default_random_engine rng(hash((int)(index * time))); thrust::uniform_real_distribution<float> unitDistrib(-1, 1); return glm::vec3((float)unitDistrib(rng), (float)unitDistrib(rng), (float)unitDistrib(rng)); } /** * LOOK-1.2 - This is a basic CUDA kernel. * CUDA kernel for generating boids with a specified mass randomly around the star. */ __global__ void kernGenerateRandomPosArray(int time, int N, glm::vec3 * arr, float scale) { int index = (blockIdx.x * blockDim.x) + threadIdx.x; if (index < N) { glm::vec3 rand = generateRandomVec3(time, index); arr[index].x = scale * rand.x; arr[index].y = scale * rand.y; arr[index].z = scale * rand.z; } } /** * Initialize memory, update some globals */ void Boids::initSimulation(int N) { numObjects = N; dim3 fullBlocksPerGrid((N + blockSize - 1) / blockSize); // LOOK-1.2 - This is basic CUDA memory management and error checking. // Don't forget to cudaFree in Boids::endSimulation. cudaMalloc((void**)&dev_pos1, N * sizeof(glm::vec3)); checkCUDAErrorWithLine("cudaMalloc dev_pos1 failed!"); cudaMalloc((void**)&dev_pos2, N * sizeof(glm::vec3)); checkCUDAErrorWithLine("cudaMalloc dev_pos2 failed!"); cudaMalloc((void**)&dev_vel1, N * sizeof(glm::vec3)); checkCUDAErrorWithLine("cudaMalloc dev_vel1 failed!"); cudaMalloc((void**)&dev_vel2, N * sizeof(glm::vec3)); checkCUDAErrorWithLine("cudaMalloc dev_vel2 failed!"); // LOOK-1.2 - This is a typical CUDA kernel invocation. kernGenerateRandomPosArray << <fullBlocksPerGrid, blockSize >> >(1, numObjects, dev_pos1, scene_scale); checkCUDAErrorWithLine("kernGenerateRandomPosArray failed!"); // LOOK-2.1 computing grid params gridCellWidth = 2.0f * std::max(std::max(rule1Distance, rule2Distance), rule3Distance); int halfSideCount = (int)(scene_scale / gridCellWidth) + 1; gridResolution = 2 * halfSideCount; gridCellCount = gridResolution * gridResolution * gridResolution; gridInverseCellWidth = 1.0f / gridCellWidth; float halfGridWidth = gridCellWidth * halfSideCount; gridMinimum.x -= halfGridWidth; gridMinimum.y -= halfGridWidth; gridMinimum.z -= halfGridWidth; // TODO-2.1 TODO-2.3 - Allocate additional buffers here. cudaMalloc((void**)&dev_particleArrayIndices, N * sizeof(int)); checkCUDAErrorWithLine("cudaMalloc dev_particleArrayIndices failed!"); cudaMalloc((void**)&dev_particleGridIndices, N * sizeof(int)); checkCUDAErrorWithLine("cudaMalloc dev_particleGridIndices failed!"); cudaMalloc((void**)&dev_gridCellStartIndices, gridCellCount * sizeof(int)); checkCUDAErrorWithLine("cudaMalloc dev_gridCellStartIndices failed!"); cudaMalloc((void**)&dev_gridCellEndIndices, gridCellCount * sizeof(int)); checkCUDAErrorWithLine("cudaMalloc dev_gridCellEndIndices failed!"); cudaThreadSynchronize(); } /****************** * copyBoidsToVBO * ******************/ /** * Copy the boid positions into the VBO so that they can be drawn by OpenGL. */ __global__ void kernCopyPositionsToVBO(int N, glm::vec3 *pos, float *vbo, float s_scale) { int index = threadIdx.x + (blockIdx.x * blockDim.x); float c_scale = -1.0f / s_scale; if (index < N) { vbo[4 * index + 0] = pos[index].x * c_scale; vbo[4 * index + 1] = pos[index].y * c_scale; vbo[4 * index + 2] = pos[index].z * c_scale; vbo[4 * index + 3] = 1.0f; } } __global__ void kernCopyVelocitiesToVBO(int N, glm::vec3 *vel, float *vbo, float s_scale) { int index = threadIdx.x + (blockIdx.x * blockDim.x); if (index < N) { vbo[4 * index + 0] = vel[index].x + 0.3f; vbo[4 * index + 1] = vel[index].y + 0.3f; vbo[4 * index + 2] = vel[index].z + 0.3f; vbo[4 * index + 3] = 1.0f; } } /** * Wrapper for call to the kernCopyboidsToVBO CUDA kernel. */ void Boids::copyBoidsToVBO(float *vbodptr_positions, float *vbodptr_velocities) { dim3 fullBlocksPerGrid((numObjects + blockSize - 1) / blockSize); kernCopyPositionsToVBO << <fullBlocksPerGrid, blockSize >> >(numObjects, dev_pos1, vbodptr_positions, scene_scale); kernCopyVelocitiesToVBO << <fullBlocksPerGrid, blockSize >> >(numObjects, dev_vel1, vbodptr_velocities, scene_scale); checkCUDAErrorWithLine("copyBoidsToVBO failed!"); cudaThreadSynchronize(); } /****************** * stepSimulation * ******************/ /** * LOOK-1.2 You can use this as a helper for kernUpdateVelocityBruteForce. * __device__ code can be called from a __global__ context * Compute the new velocity on the body with index `iSelf` due to the `N` boids * in the `pos` and `vel` arrays. */ __device__ glm::vec3 computeVelocityChange(int N, int iSelf, const glm::vec3 *pos, const glm::vec3 *vel) { float count1 = 0.f; float count3 = 0.f; glm::vec3 spos = pos[iSelf]; glm::vec3 v1(0.f), v2(0.f), v3(0.f); for (int i = 0; i < N; ++i) { // Rule 1: boids fly towards their local perceived center of mass, which excludes themselves if (i != iSelf) { glm::vec3 bpos = pos[i]; float dist = glm::distance(spos, bpos); if (dist < rule1Distance) { v1 += bpos; count1++; } // Rule 2: boids try to stay a distance d away from each other if (dist < rule2Distance) { v2 -= (bpos - spos); } // Rule 3: boids try to match the speed of surrounding boids if (dist < rule3Distance) { v3 += vel[i]; count3++; } } } v1 = (count1 > 0) ? v1 / count1 - spos : glm::vec3(0.f); v3 = (count3 > 0) ? v3 / count3 : glm::vec3(0.f); return v1*rule1Scale + v2*rule2Scale + v3*rule3Scale; } /** * TODO-1.2 implement basic flocking * For each of the `N` bodies, update its position based on its current velocity. */ __global__ void kernUpdateVelocityBruteForce(int N, glm::vec3 *pos, glm::vec3 *vel1, glm::vec3 *vel2) { int index = (blockIdx.x * blockDim.x) + threadIdx.x; if (index >= N) return; // Compute a new velocity based on pos and vel1 glm::vec3 vel = vel1[index] + computeVelocityChange(N, index, pos, vel1); // Clamp the speed vel = (glm::length(vel) > maxSpeed) ? glm::normalize(vel) * maxSpeed : vel; // Record the new velocity into vel2. Question: why NOT vel1? read / write vel2[index] = vel; } /** * LOOK-1.2 Since this is pretty trivial, we implemented it for you. * For each of the `N` bodies, update its position based on its current velocity. */ __global__ void kernUpdatePos(int N, float dt, glm::vec3 *pos, glm::vec3 *vel) { // Update position by velocity int index = threadIdx.x + (blockIdx.x * blockDim.x); if (index >= N) { return; } glm::vec3 thisPos = pos[index]; thisPos += vel[index] * dt; // Wrap the boids around so we don't lose them thisPos.x = thisPos.x < -scene_scale ? scene_scale : thisPos.x; thisPos.y = thisPos.y < -scene_scale ? scene_scale : thisPos.y; thisPos.z = thisPos.z < -scene_scale ? scene_scale : thisPos.z; thisPos.x = thisPos.x > scene_scale ? -scene_scale : thisPos.x; thisPos.y = thisPos.y > scene_scale ? -scene_scale : thisPos.y; thisPos.z = thisPos.z > scene_scale ? -scene_scale : thisPos.z; pos[index] = thisPos; } __global__ void kernUpdateCoherentPos(int N, float dt, glm::vec3 *in, glm::vec3 *out, glm::vec3 *vel) { // Update position by velocity int index = threadIdx.x + (blockIdx.x * blockDim.x); if (index >= N) { return; } glm::vec3 thisPos = in[index]; thisPos += vel[index] * dt; // Wrap the boids around so we don't lose them thisPos.x = thisPos.x < -scene_scale ? scene_scale : thisPos.x; thisPos.y = thisPos.y < -scene_scale ? scene_scale : thisPos.y; thisPos.z = thisPos.z < -scene_scale ? scene_scale : thisPos.z; thisPos.x = thisPos.x > scene_scale ? -scene_scale : thisPos.x; thisPos.y = thisPos.y > scene_scale ? -scene_scale : thisPos.y; thisPos.z = thisPos.z > scene_scale ? -scene_scale : thisPos.z; out[index] = thisPos; } // LOOK-2.1 Consider this method of computing a 1D index from a 3D grid index. // LOOK-2.3 Looking at this method, what would be the most memory efficient // order for iterating over neighboring grid cells? // for(z) // for(y) // for(x) __device__ int gridIndex3Dto1D(int x, int y, int z, int gridResolution) { return x + y * gridResolution + z * gridResolution * gridResolution; } __global__ void kernComputeIndices(int N, int gridRes, glm::vec3 gridMin, float inverseCellWidth, glm::vec3 *pos, int *indices, int *gridIndices) { int index = (blockIdx.x * blockDim.x) + threadIdx.x; if (index >= N) return; //printf("%f, %f, %f \n", pos[index].x, pos[index].y, pos[index].z); // TODO-2.1 // - Label each boid with the index of its grid cell (dev_particleGridIndices) glm::ivec3 boid_pos = (pos[index] - gridMin) * inverseCellWidth; gridIndices[index] = gridIndex3Dto1D(boid_pos.x, boid_pos.y, boid_pos.z, gridRes); // - Set up a parallel array of integer indices as pointers to the actual // boid data in pos and vel1/vel2 (dev_particleArrayIndices) indices[index] = index; } // LOOK-2.1 Consider how this could be useful for indicating that a cell // does not enclose any boids __global__ void kernResetIntBuffer(int N, int *intBuffer, int value) { int index = (blockIdx.x * blockDim.x) + threadIdx.x; if (index < N) { intBuffer[index] = value; } } __global__ void kernIdentifyCellStartEnd(int N, int *particleGridIndices, int *gridCellStartIndices, int *gridCellEndIndices) { // TODO-2.1 // Identify the start and end point of each cell in the gridIndices array. // This is basically a parallel unrolling of a loop that goes // "this index doesn't match the one before it, must be a new cell!" int index = (blockIdx.x * blockDim.x) + threadIdx.x; if (index >= N) return; int curr = particleGridIndices[index]; if (index == 0) { gridCellStartIndices[curr] = index; return; } int prev = particleGridIndices[index - 1]; if (prev != curr) { gridCellStartIndices[curr] = index; gridCellEndIndices[prev] = index; // not inclusive } if (index == N - 1) { gridCellEndIndices[curr] = index + 1; // not inclusive return; } } __global__ void kernUpdateVelNeighborSearchScattered( int N, int gridRes, glm::vec3 gridMin, float inverseCellWidth, float cellWidth, int *gridCellStartIndices, int *gridCellEndIndices, int *particleArrayIndices, glm::vec3 *pos, glm::vec3 *vel1, glm::vec3 *vel2) { // TODO-2.1 - Update a boid's velocity using the uniform grid to reduce // the number of boids that need to be checked. int index = (blockIdx.x * blockDim.x) + threadIdx.x; if (index >= N) return; // - Identify the grid cell that this particle is in int partIdx = particleArrayIndices[index]; glm::vec3 ppos = pos[partIdx]; glm::ivec3 partIdx3D = (ppos - gridMin) * inverseCellWidth; // - Identify which cells may contain neighbors. This isn't always 8. int neighbors[8]; //int mask[8]; int numNeighbors = 0; int nextZ = (ppos.z - cellWidth * partIdx3D.z - gridMin.z < cellWidth/2) ? -1 : 1; for (int k = 0; k < 2; k ++) { int z = partIdx3D.z + nextZ*k; if (z < 0 && z >= gridRes) break; int nextY = (ppos.y - cellWidth * partIdx3D.y - gridMin.y < cellWidth / 2) ? -1 : 1; for (int j = 0; j < 2; j ++) { int y = partIdx3D.y + nextY*j; if (y < 0 && y >= gridRes) break; int nextX = (ppos.x - cellWidth * partIdx3D.x - gridMin.x < cellWidth / 2) ? -1 : 1; for (int i = 0; i < 2; i ++) { int x = partIdx3D.x + nextX*i; if (x < 0 && x >= gridRes) break; neighbors[numNeighbors] = gridIndex3Dto1D(x, y, z, gridRes); numNeighbors++; } } } float count1 = 0.f; float count3 = 0.f; glm::vec3 v1(0.f), v2(0.f), v3(0.f); for (int i = 0; i < numNeighbors; ++i) { // - For each cell, read the start/end indices in the boid pointer array. int startIdx = gridCellStartIndices[neighbors[i]]; int endIdx = gridCellEndIndices[neighbors[i]]; // - Access each boid in the cell and compute velocity change from // the boids rules, if this boid is within the neighborhood distance. for (int j = startIdx; j < endIdx; ++j) { if (j != index) { int boidIdx = particleArrayIndices[j]; glm::vec3 bpos = pos[boidIdx]; float dist = glm::distance(ppos, bpos); // Rule 1: boids fly towards their local perceived center of mass, which excludes themselves if (dist < rule1Distance) { v1 += bpos; count1++; } // Rule 2: boids try to stay a distance d away from each other if (dist < rule2Distance) { v2 -= (bpos - ppos); } // Rule 3: boids try to match the speed of surrounding boids if (dist < rule3Distance) { v3 += vel1[boidIdx]; count3++; } } } } v1 = (count1 > 0) ? v1 / count1 - ppos : glm::vec3(0.f); v3 = (count3 > 0) ? v3 / count3 : glm::vec3(0.f); glm::vec3 vel = vel1[partIdx] + v1*rule1Scale + v2*rule2Scale + v3*rule3Scale; // Clamp the speed vel = (glm::length(vel) > maxSpeed) ? glm::normalize(vel) * maxSpeed : vel; // Record the new velocity into vel2. vel2[partIdx] = vel; } __global__ void kernShuffleV(int N, int *particleArrayIndices, glm::vec3 *new_arr, glm::vec3 *old_arr) { int index = (blockIdx.x * blockDim.x) + threadIdx.x; if (index >= N) return; new_arr[index] = old_arr[particleArrayIndices[index]]; } __global__ void kernShuffleP(int N, int *particleArrayIndices, glm::vec3 *new_arr, glm::vec3 *old_arr) { int index = (blockIdx.x * blockDim.x) + threadIdx.x; if (index >= N) return; new_arr[index] = old_arr[particleArrayIndices[index]]; //printf("%d\n", particleArrayIndices[index]); //printf("%f, %f, %f \n", old_arr[index].x, old_arr[index].y, old_arr[index].z); } __global__ void kernUpdateVelNeighborSearchCoherent( int N, int gridRes, glm::vec3 gridMin, float inverseCellWidth, float cellWidth, int *gridCellStartIndices, int *gridCellEndIndices, glm::vec3 *pos, glm::vec3 *vel1, glm::vec3 *vel2) { // TODO-2.3 - This should be very similar to kernUpdateVelNeighborSearchScattered, // except with one less level of indirection. // This should expect gridCellStartIndices and gridCellEndIndices to refer // directly to pos and vel1. int index = (blockIdx.x * blockDim.x) + threadIdx.x; if (index >= N) return; // - Identify the grid cell that this particle is in glm::vec3 ppos = pos[index]; glm::ivec3 partIdx3D = (ppos - gridMin) * inverseCellWidth; // - Identify which cells may contain neighbors. This isn't always 8. int neighbors[8]; int numNeighbors = 0; int nextZ = (ppos.z - cellWidth * partIdx3D.z - gridMin.z < cellWidth/2) ? -1 : 1; for (int k = 0; k < 2; k++) { int z = partIdx3D.z + nextZ*k; if (z < 0 && z >= gridRes) break; int nextY = (ppos.y - cellWidth * partIdx3D.y - gridMin.y < cellWidth/ 2) ? -1 : 1; for (int j = 0; j < 2; j++) { int y = partIdx3D.y + nextY*j; if (y < 0 && y >= gridRes) break; int nextX = (ppos.x - cellWidth * partIdx3D.x - gridMin.x < cellWidth / 2) ? -1 : 1; for (int i = 0; i < 2; i++) { int x = partIdx3D.x + nextX*i; if (x < 0 && x >= gridRes) break; neighbors[numNeighbors] = gridIndex3Dto1D(x, y, z, gridRes); numNeighbors++; } } } float count1 = 0.f; float count3 = 0.f; glm::vec3 v1(0.f), v2(0.f), v3(0.f); for (int i = 0; i < numNeighbors; ++i) { // - For each cell, read the start/end indices in the boid pointer array. int startIdx = gridCellStartIndices[neighbors[i]]; int endIdx = gridCellEndIndices[neighbors[i]]; // DIFFERENCE: For best results, consider what order the cells should be // checked in to maximize the memory benefits of reordering the boids data. // - Access each boid in the cell and compute velocity change from // the boids rules, if this boid is within the neighborhood distance. for (int boidIdx = startIdx; boidIdx < endIdx; ++boidIdx) { if (boidIdx != index) { glm::vec3 bpos = pos[boidIdx]; float dist = glm::distance(ppos, bpos); // Rule 1: boids fly towards their local perceived center of mass, which excludes themselves if (dist < rule1Distance) { v1 += bpos; count1++; } // Rule 2: boids try to stay a distance d away from each other if (dist < rule2Distance) { v2 -= (bpos - ppos); } // Rule 3: boids try to match the speed of surrounding boids if (dist < rule3Distance) { v3 += vel1[boidIdx]; count3++; } } } } v1 = (count1 > 0) ? v1 / count1 - ppos : glm::vec3(0.f); v3 = (count3 > 0) ? v3 / count3 : glm::vec3(0.f); glm::vec3 vel = vel1[index] + v1*rule1Scale + v2*rule2Scale + v3*rule3Scale; // Clamp the speed vel = (glm::length(vel) > maxSpeed) ? glm::normalize(vel) * maxSpeed : vel; // Record the new velocity into vel2. vel2[index] = vel; } /** * Step the entire N-body simulation by `dt` seconds. */ void Boids::stepSimulationNaive(float dt) { dim3 blocksPerGrid((numObjects + blockSize - 1) / blockSize); // TODO-1.2 - use the kernels you wrote to step the simulation forward in time. kernUpdateVelocityBruteForce << <blocksPerGrid, blockSize >> >(numObjects, dev_pos1, dev_vel1, dev_vel2); checkCUDAErrorWithLine("kernUpdateVelocityBruteForce failed!"); kernUpdatePos << <blocksPerGrid, blockSize >> >(numObjects, dt, dev_pos1, dev_vel2); checkCUDAErrorWithLine("kernUpdatePos failed!"); // TODO-1.2 ping-pong the velocity buffers (read/write) std::swap(dev_vel1, dev_vel2); } void Boids::stepSimulationScatteredGrid(float dt) { // TODO-2.1 // Uniform Grid Neighbor search using Thrust sort. dim3 blocksPerGrid((numObjects + blockSize - 1) / blockSize); dim3 blocksPerGridCell((gridCellCount + blockSize - 1) / blockSize); // In Parallel: // - label each particle with its array index as well as its grid index. // Use 2x width grids. kernComputeIndices << <blocksPerGrid, blockSize >> >(numObjects, gridResolution, gridMinimum, gridInverseCellWidth, dev_pos1, dev_particleArrayIndices, dev_particleGridIndices); checkCUDAErrorWithLine("kernComputeIndices failed!"); // - Unstable key sort using Thrust. A stable sort isn't necessary, but you // are welcome to do a performance comparison. thrust::device_ptr<int> dev_thrust_grid(dev_particleGridIndices); thrust::device_ptr<int> dev_thrust_particles(dev_particleArrayIndices); thrust::sort_by_key(dev_thrust_grid, dev_thrust_grid + numObjects, dev_thrust_particles); // - Naively unroll the loop for finding the start and end indices of each // cell's data pointers in the array of boid indices kernResetIntBuffer << <blocksPerGridCell, blockSize >> >(gridCellCount, dev_gridCellStartIndices, -1); kernResetIntBuffer << <blocksPerGridCell, blockSize >> >(gridCellCount, dev_gridCellEndIndices, -1); checkCUDAErrorWithLine("kernResetIntBuffer failed!"); kernIdentifyCellStartEnd << <blocksPerGrid, blockSize >> >(numObjects, dev_particleGridIndices, dev_gridCellStartIndices, dev_gridCellEndIndices); checkCUDAErrorWithLine("kernIdentifyCellStartEnd failed!"); // - Perform velocity updates using neighbor search kernUpdateVelNeighborSearchScattered << <blocksPerGrid, blockSize >> >(numObjects, gridResolution, gridMinimum, gridInverseCellWidth, gridCellWidth, dev_gridCellStartIndices, dev_gridCellEndIndices, dev_particleArrayIndices, dev_pos1, dev_vel1, dev_vel2); checkCUDAErrorWithLine("kernUpdateVelNeighborSearchScattered failed!"); // - Update positions kernUpdatePos << <blocksPerGrid, blockSize >> >(numObjects, dt, dev_pos1, dev_vel2); checkCUDAErrorWithLine("kernUpdatePos failed!"); // - Ping-pong buffers as needed std::swap(dev_vel1, dev_vel2); } void Boids::stepSimulationCoherentGrid(float dt) { // TODO-2.3 - start by copying Boids::stepSimulationNaiveGrid // Uniform Grid Neighbor search using Thrust sort on cell-coherent data. dim3 blocksPerGrid((numObjects + blockSize - 1) / blockSize); dim3 blocksPerGridCell((gridCellCount + blockSize - 1) / blockSize); // In Parallel: // - label each particle with its array index as well as its grid index. // Use 2x width grids. kernComputeIndices << <blocksPerGrid, blockSize >> >(numObjects, gridResolution, gridMinimum, gridInverseCellWidth, dev_pos1, dev_particleArrayIndices, dev_particleGridIndices); checkCUDAErrorWithLine("kernComputeIndices failed!"); // - Unstable key sort using Thrust. A stable sort isn't necessary, but you // are welcome to do a performance comparison. thrust::device_ptr<int> dev_thrust_grid(dev_particleGridIndices); thrust::device_ptr<int> dev_thrust_particles(dev_particleArrayIndices); thrust::sort_by_key(dev_thrust_grid, dev_thrust_grid + numObjects, dev_thrust_particles); // - Naively unroll the loop for finding the start and end indices of each // cell's data pointers in the array of boid indices kernResetIntBuffer << <blocksPerGridCell, blockSize >> >(gridCellCount, dev_gridCellStartIndices, -1); kernResetIntBuffer << <blocksPerGridCell, blockSize >> >(gridCellCount, dev_gridCellEndIndices, -1); checkCUDAErrorWithLine("kernResetIntBuffer failed!"); kernIdentifyCellStartEnd << <blocksPerGrid, blockSize >> >(numObjects, dev_particleGridIndices, dev_gridCellStartIndices, dev_gridCellEndIndices); checkCUDAErrorWithLine("kernIdentifyCellStartEnd failed!"); //// - BIG DIFFERENCE: use the rearranged array index buffer to reshuffle all //// the particle data in the simulation array. //// CONSIDER WHAT ADDITIONAL BUFFERS YOU NEED. kernShuffleV << <blocksPerGrid, blockSize >> >(numObjects, dev_particleArrayIndices, dev_vel2, dev_vel1); // out, in kernShuffleP << <blocksPerGrid, blockSize >> >(numObjects, dev_particleArrayIndices, dev_pos2, dev_pos1); checkCUDAErrorWithLine("kernShuffle failed!"); // - Perform velocity updates using neighbor search kernUpdateVelNeighborSearchCoherent << <blocksPerGrid, blockSize >> >(numObjects, gridResolution, gridMinimum, gridInverseCellWidth, gridCellWidth, dev_gridCellStartIndices, dev_gridCellEndIndices, dev_pos2, dev_vel2, dev_vel1); checkCUDAErrorWithLine("kernUpdateVelNeighborSearchCoherent failed!"); // - Update positions into pos1 kernUpdateCoherentPos << <blocksPerGrid, blockSize >> >(numObjects, dt, dev_pos2, dev_pos1, dev_vel1); // in, out checkCUDAErrorWithLine("kernUpdatePos failed!"); // - Ping-pong buffers as needed. THIS MAY BE DIFFERENT FROM BEFORE. } void Boids::endSimulation() { cudaFree(dev_vel1); cudaFree(dev_vel2); cudaFree(dev_pos1); // TODO-2.1 TODO-2.3 - Free any additional buffers here. cudaFree(dev_gridCellStartIndices); cudaFree(dev_gridCellEndIndices); cudaFree(dev_particleArrayIndices); cudaFree(dev_particleGridIndices); } void Boids::unitTest() { // LOOK-1.2 Feel free to write additional tests here. // test unstable sort int *dev_intKeys; int *dev_intValues; int N = 10; int *intKeys = new int[N]; int *intValues = new int[N]; intKeys[0] = 0; intValues[0] = 0; intKeys[1] = 1; intValues[1] = 1; intKeys[2] = 0; intValues[2] = 2; intKeys[3] = 3; intValues[3] = 3; intKeys[4] = 0; intValues[4] = 4; intKeys[5] = 2; intValues[5] = 5; intKeys[6] = 2; intValues[6] = 6; intKeys[7] = 0; intValues[7] = 7; intKeys[8] = 5; intValues[8] = 8; intKeys[9] = 6; intValues[9] = 9; cudaMalloc((void**)&dev_intKeys, N * sizeof(int)); checkCUDAErrorWithLine("cudaMalloc dev_intKeys failed!"); cudaMalloc((void**)&dev_intValues, N * sizeof(int)); checkCUDAErrorWithLine("cudaMalloc dev_intValues failed!"); dim3 fullBlocksPerGrid((N + blockSize - 1) / blockSize); std::cout << "before unstable sort: " << std::endl; for (int i = 0; i < N; i++) { std::cout << " key: " << intKeys[i]; std::cout << " value: " << intValues[i] << std::endl; } // How to copy data to the GPU cudaMemcpy(dev_intKeys, intKeys, sizeof(int) * N, cudaMemcpyHostToDevice); cudaMemcpy(dev_intValues, intValues, sizeof(int) * N, cudaMemcpyHostToDevice); // Wrap device vectors in thrust iterators for use with thrust. thrust::device_ptr<int> dev_thrust_keys(dev_intKeys); thrust::device_ptr<int> dev_thrust_values(dev_intValues); // LOOK-2.1 Example for using thrust::sort_by_key thrust::sort_by_key(dev_thrust_keys, dev_thrust_keys + N, dev_thrust_values); // How to copy data back to the CPU side from the GPU cudaMemcpy(intKeys, dev_intKeys, sizeof(int) * N, cudaMemcpyDeviceToHost); cudaMemcpy(intValues, dev_intValues, sizeof(int) * N, cudaMemcpyDeviceToHost); checkCUDAErrorWithLine("memcpy back failed!"); std::cout << "after unstable sort: " << std::endl; for (int i = 0; i < N; i++) { std::cout << " key: " << intKeys[i]; std::cout << " value: " << intValues[i] << std::endl; } // cleanup delete[] intKeys; delete[] intValues; cudaFree(dev_intKeys); cudaFree(dev_intValues); checkCUDAErrorWithLine("cudaFree failed!"); return; }
4be2b590dfbe46a9ddba6519e53da62b038652e6.hip
// !!! This is a file automatically generated by hipify!!! // Copyright 2020 California Institute of Technology. All rights reserved. // Use of this source code is governed by a BSD-style license that can be // found in the LICENSE file. // Author: Ethan Jaszewski #include "ce.h" #include <algorithm> #include <iostream> #include "hip/hip_runtime.h" #include "math.h" #include "errchk.cuh" // // Simple ConditionalEntropy Function Definitions // ConditionalEntropy::ConditionalEntropy(size_t n_phase, size_t n_mag, size_t p_overlap, size_t m_overlap) { // Just set number of bins num_phase_bins = n_phase; num_mag_bins = n_mag; // Just set the overlap num_phase_overlap = p_overlap; num_mag_overlap = m_overlap; // Calculate bin size accordingly phase_bin_size = 1.0 / static_cast<float>(n_phase); mag_bin_size = 1.0 / static_cast<float>(n_mag); } __host__ __device__ size_t ConditionalEntropy::NumBins() const { return num_phase_bins * num_mag_bins; } __host__ __device__ size_t ConditionalEntropy::NumPhaseBins() const { return num_phase_bins; } __host__ __device__ size_t ConditionalEntropy::NumMagBins() const { return num_mag_bins; } __host__ __device__ size_t ConditionalEntropy::NumPhaseBinOverlap() const { return num_phase_overlap; } __host__ __device__ size_t ConditionalEntropy::NumMagBinOverlap() const { return num_mag_overlap; } __host__ __device__ float ConditionalEntropy::PhaseBinSize() const { return phase_bin_size; } __host__ __device__ float ConditionalEntropy::MagBinSize() const { return mag_bin_size; } __host__ __device__ size_t ConditionalEntropy::PhaseBin(float phase_val) const { return static_cast<size_t>(phase_val / phase_bin_size); } __host__ __device__ size_t ConditionalEntropy::MagBin(float mag_val) const { return static_cast<size_t>(mag_val / mag_bin_size); } __host__ __device__ size_t ConditionalEntropy::BinIndex(size_t phase_bin, size_t mag_bin) const { return phase_bin * num_mag_bins + mag_bin; } // // CUDA Kernels // /** * Folds and bins the input data across all trial periods and time derivatives. * * This kernel takes in a time-series of paired times and magnitudes, folding * the times according to the given trial periods and time derivatives, * outputting a series of histograms into global memory. * * Each block computes a histogram of the full data series for a given period * and period time derivative. As such, the x-dimension of the grid should match * the number of trial periods, and the y-dimension of the grid should match the * number of trial period time derivatives. * * Internally, the kernel uses shared memory atomics with a 32-bit integer based * histogram, which requires a total of 4 * Histogram Size bytes of shared * memory. Due to the use of shared atomics, this kernel will perform poorly on * pre-Maxwell GPUs. * * Note: All arrays must be device-allocated * * @param times light curve datapoint times * @param mags light curve datapoint magnitudes * @param periods list of trial periods * @param period_dts list of trial period time derivatives * @param h_params histogram parameters * @param hists array of output histograms */ __global__ void FoldBinKernel(const float* times, const float* mags, const size_t length, const float* periods, const float* period_dts, const ConditionalEntropy h_params, float* hists) { // Histogram which this block will produce. const size_t block_id = blockIdx.x * gridDim.y + blockIdx.y; float* block_hist = &hists[h_params.NumBins() * block_id]; // Period and period time derivative for this block. const float period = periods[blockIdx.x]; const float period_dt = period_dts[blockIdx.y]; // Time derivative correction factor. const float pdt_corr = (period_dt / period) / 2; // Shared memory histogram for this thread. extern __shared__ uint32_t sh_hist[]; // Zero the shared memory for this block for (size_t i = threadIdx.x; i < h_params.NumBins(); i += blockDim.x) { sh_hist[i] = 0; } __syncthreads(); float i_part; // Only used for modff. // Accumulate into this thread's histogram (as many points as needed), // simultaneously computing the folded time value for (size_t idx = threadIdx.x; idx < length; idx += blockDim.x) { float t = times[idx]; float t_corr = t - pdt_corr * t * t; float folded = fabsf(modff(t_corr / period, &i_part)); size_t phase_bin = h_params.PhaseBin(folded); size_t mag_bin = h_params.MagBin(mags[idx]); for (size_t i = 0; i < h_params.NumPhaseBinOverlap(); i++) { for (size_t j = 0; j < h_params.NumMagBinOverlap(); j++) { size_t idx = h_params.BinIndex((phase_bin + i) % h_params.NumPhaseBins(), (mag_bin + j) % h_params.NumMagBins()); atomicAdd(&sh_hist[idx], 1); } } } __syncthreads(); size_t div = length * h_params.NumPhaseBinOverlap() * h_params.NumMagBinOverlap(); // Copy the block's histogram into global memory for (size_t i = threadIdx.x; i < h_params.NumBins(); i += blockDim.x) { block_hist[i] = static_cast<float>(sh_hist[i]) / static_cast<float>(div); } } /** * Computes the conditional entropy for the input histograms. * * This kernel takes in an arbitrarily long list of histograms with a given set * of parameters and computes the conditional entropy for each histogram, * outputting a series of values into an array. * * Internally, each thread is responsible for first computing the conditional * entropy of one phase bin of the input (disregarding histogram boundaries), * then the values for each thread are accumulated directly into global memory * to avoid potential inter-block conflicts. The computation uses shared memory * equal to 4 * Number of Threads bytes. * * Note: All arrays must be device-allocated * * @param hists array of input histograms * @param num_hists number of histograms * @param h_params histogram parameters * @param ce_vals output array of conditional entropy values */ __global__ void ConditionalEntropyKernel(const float* hists, const size_t num_hists, const ConditionalEntropy h_params, float* ce_vals) { // Shared memory scratch space extern __shared__ float scratch[]; // Which histogram row this thread is summing size_t idx = blockIdx.x * blockDim.x + threadIdx.x; // Don't compute for out-of-bounds histograms if (idx / h_params.NumPhaseBins() >= num_hists) { return; } // Which shared memory location this thread uses size_t tid = threadIdx.x; // Index in the histogram array corresponding to the start of this row const size_t offset = idx * h_params.NumMagBins(); // Accumulate into shared memory (compute p(phi_j)) scratch[tid] = 0; for (size_t i = 0; i < h_params.NumMagBins(); i++) { scratch[tid] += hists[i + offset]; } // Compute per-phase-bin conditional entropy // TODO: remove use of global mem? float p_j = scratch[tid]; // Store p_j scratch[tid] = 0; // Reset shmem before summing for (size_t i = 0; i < h_params.NumMagBins(); i++) { float p_ij = hists[i + offset]; if (p_ij != 0) { scratch[tid] += p_ij * logf(p_j / p_ij); } } // Accumulate per-phase-bin conditional entropy into total conditional // entropy for the histogram. // TODO: Replace with shared memory reduction of some kind. *yikes* size_t ce_idx = idx / h_params.NumPhaseBins(); atomicAdd(&ce_vals[ce_idx], scratch[tid]); } // // Wrapper Functions // float* ConditionalEntropy::DeviceFoldAndBin(const float* times, const float* mags, const size_t length, const float* periods, const float* period_dts, const size_t num_periods, const size_t num_p_dts) const { // Number of bytes of global memory required to store output size_t bytes = NumBins() * sizeof(float) * num_periods * num_p_dts; // Allocate and zero global memory for output histograms float* dev_hists; gpuErrchk(hipMalloc(&dev_hists, bytes)); gpuErrchk(hipMemset(dev_hists, 0, bytes)); // Number of threads and corresponding shared memory usage const size_t num_threads = 512; const size_t shared_bytes = NumBins() * sizeof(uint32_t); // Grid to search over periods and time derivatives const dim3 grid_dim = dim3(num_periods, num_p_dts); // NOTE: A ConditionalEntropy object is small enough that we can pass it in // the registers by dereferencing it. hipLaunchKernelGGL(( FoldBinKernel), dim3(grid_dim), dim3(num_threads), shared_bytes, 0, times, mags, length, periods, period_dts, *this, dev_hists); return dev_hists; } float* ConditionalEntropy::FoldAndBin(const float* times, const float* mags, const size_t length, const float* periods, const float* period_dts, const size_t num_periods, const size_t num_p_dts) const { // Number of bytes of input data const size_t data_bytes = length * sizeof(float); // Allocate device pointers float* dev_times; float* dev_mags; float* dev_periods; float* dev_period_dts; gpuErrchk(hipMalloc(&dev_times, data_bytes)); gpuErrchk(hipMalloc(&dev_mags, data_bytes)); gpuErrchk(hipMalloc(&dev_periods, num_periods * sizeof(float))); gpuErrchk(hipMalloc(&dev_period_dts, num_p_dts * sizeof(float))); // Copy data to device memory gpuErrchk(hipMemcpy(dev_times, times, data_bytes, hipMemcpyHostToDevice)); gpuErrchk(hipMemcpy(dev_mags, mags, data_bytes, hipMemcpyHostToDevice)); gpuErrchk(hipMemcpy(dev_periods, periods, num_periods * sizeof(float), hipMemcpyHostToDevice)); gpuErrchk(hipMemcpy(dev_periods, period_dts, num_p_dts * sizeof(float), hipMemcpyHostToDevice)); float* dev_hists = DeviceFoldAndBin(dev_times, dev_mags, length, dev_periods, dev_period_dts, num_periods, num_p_dts); // Allocate host histograms and copy from device size_t bytes = NumBins() * num_periods * num_p_dts * sizeof(float); float* hists = (float*)malloc(bytes); gpuErrchk(hipMemcpy(hists, dev_hists, bytes, hipMemcpyDeviceToHost)); // Free GPU memory gpuErrchk(hipFree(dev_times)); gpuErrchk(hipFree(dev_mags)); gpuErrchk(hipFree(dev_periods)); gpuErrchk(hipFree(dev_period_dts)); gpuErrchk(hipFree(dev_hists)); return hists; } float* ConditionalEntropy::DeviceCalcCEFromHists(const float* hists, const size_t num_hists) const { // Allocate global memory for output conditional entropy values float* dev_ces; gpuErrchk(hipMalloc(&dev_ces, num_hists * sizeof(float))); const size_t n_t = 512; const size_t n_b = ((num_hists * NumPhaseBins()) / n_t) + 1; // NOTE: A ConditionalEntropy object is small enough that we can pass it in // the registers by dereferencing it. hipLaunchKernelGGL(( ConditionalEntropyKernel), dim3(n_b), dim3(n_t), n_t * sizeof(float), 0, hists, num_hists, *this, dev_ces); return dev_ces; } float* ConditionalEntropy::CalcCEFromHists(const float* hists, const size_t num_hists) const { // Number of bytes in the histogram const size_t bytes = num_hists * NumBins() * sizeof(float); // Allocate device memory for histograms and copy over float* dev_hists; gpuErrchk(hipMalloc(&dev_hists, bytes)); gpuErrchk(hipMemcpy(dev_hists, hists, bytes, hipMemcpyHostToDevice)); float* dev_ces = DeviceCalcCEFromHists(dev_hists, num_hists); // Copy CEs to host float* ces = (float*)malloc(num_hists * sizeof(float)); gpuErrchk(hipMemcpy(ces, dev_ces, num_hists * sizeof(float), hipMemcpyDeviceToHost)); // Free GPU memory gpuErrchk(hipFree(dev_hists)); gpuErrchk(hipFree(dev_ces)); return ces; } float* ConditionalEntropy::CalcCEVals(const float* times, const float* mags, const size_t length, const float* periods, const float* period_dts, const size_t num_periods, const size_t num_p_dts) const { // Number of bytes of input data const size_t data_bytes = length * sizeof(float); const size_t num_hists = num_periods * num_p_dts; // Allocate device pointers float* dev_times; float* dev_mags; float* dev_periods; float* dev_period_dts; gpuErrchk(hipMalloc(&dev_times, data_bytes)); gpuErrchk(hipMalloc(&dev_mags, data_bytes)); gpuErrchk(hipMalloc(&dev_periods, num_periods * sizeof(float))); gpuErrchk(hipMalloc(&dev_period_dts, num_p_dts * sizeof(float))); // Copy data to device memory gpuErrchk(hipMemcpy(dev_times, times, data_bytes, hipMemcpyHostToDevice)); gpuErrchk(hipMemcpy(dev_mags, mags, data_bytes, hipMemcpyHostToDevice)); gpuErrchk(hipMemcpy(dev_periods, periods, num_periods * sizeof(float), hipMemcpyHostToDevice)); gpuErrchk(hipMemcpy(dev_period_dts, period_dts, num_p_dts * sizeof(float), hipMemcpyHostToDevice)); float* dev_hists = DeviceFoldAndBin(dev_times, dev_mags, length, dev_periods, dev_period_dts, num_periods, num_p_dts); float* dev_ces = DeviceCalcCEFromHists(dev_hists, num_hists); // Copy CEs to host float* ces = (float*)malloc(num_hists * sizeof(float)); gpuErrchk(hipMemcpy(ces, dev_ces, num_hists * sizeof(float), hipMemcpyDeviceToHost)); // Free intermediate and output values gpuErrchk(hipFree(dev_hists)); gpuErrchk(hipFree(dev_ces)); // Free GPU inputs gpuErrchk(hipFree(dev_times)); gpuErrchk(hipFree(dev_mags)); gpuErrchk(hipFree(dev_periods)); gpuErrchk(hipFree(dev_period_dts)); return ces; } float* ConditionalEntropy::CalcCEValsBatched(const std::vector<float*>& times, const std::vector<float*>& mags, const std::vector<size_t>& lengths, const float* periods, const float* period_dts, const size_t num_periods, const size_t num_p_dts) const { // TODO: Use async memory transferring // TODO: Look at ways of batching data transfer. // Size of one CE out array, and total CE output size. size_t ce_out_size = num_periods * num_p_dts * sizeof(float); size_t ce_size_total = ce_out_size * lengths.size(); // Allocate the output CE array so we can copy to it. float* ce_host = (float*)malloc(ce_size_total); // Copy trial information over float* dev_periods; float* dev_period_dts; gpuErrchk(hipMalloc(&dev_periods, num_periods * sizeof(float))); gpuErrchk(hipMalloc(&dev_period_dts, num_p_dts * sizeof(float))); gpuErrchk(hipMemcpy(dev_periods, periods, num_periods * sizeof(float), hipMemcpyHostToDevice)); gpuErrchk(hipMemcpy(dev_period_dts, period_dts, num_p_dts * sizeof(float), hipMemcpyHostToDevice)); // Intermediate histogram memory size_t num_hists = num_periods * num_p_dts; size_t hist_bytes = NumBins() * sizeof(float) * num_hists; float* dev_hists; gpuErrchk(hipMalloc(&dev_hists, hist_bytes)); // Intermediate conditional entropy memory float* dev_ces; gpuErrchk(hipMalloc(&dev_ces, ce_out_size)); // Kernel launch information for the fold & bin step const size_t num_threads_fb = 256; const size_t shared_bytes_fb = NumBins() * sizeof(uint32_t); const dim3 grid_dim_fb = dim3(num_periods, num_p_dts); // Kernel launch information for the ce calculation step const size_t num_threads_ce = 256; const size_t num_blocks_ce = ((num_hists * NumPhaseBins()) / num_threads_ce) + 1; const size_t shared_bytes_ce = num_threads_ce * sizeof(float); // Buffer size (large enough for longest light curve) auto max_length = std::max_element(lengths.begin(), lengths.end()); const size_t buffer_length = *max_length; const size_t buffer_bytes = sizeof(float) * buffer_length; float* dev_times_buffer; float* dev_mags_buffer; gpuErrchk(hipMalloc(&dev_times_buffer, buffer_bytes)); gpuErrchk(hipMalloc(&dev_mags_buffer, buffer_bytes)); for (size_t i = 0; i < lengths.size(); i++) { // Copy light curve into device buffer const size_t curve_bytes = lengths[i] * sizeof(float); gpuErrchk(hipMemcpy(dev_times_buffer, times[i], curve_bytes, hipMemcpyHostToDevice)); gpuErrchk(hipMemcpy(dev_mags_buffer, mags[i], curve_bytes, hipMemcpyHostToDevice)); // Zero conditional entropy output gpuErrchk(hipMemset(dev_ces, 0, ce_out_size)); // NOTE: A ConditionalEntropy object is small enough that we can pass it // in the registers by dereferencing it. hipLaunchKernelGGL(( FoldBinKernel), dim3(grid_dim_fb), dim3(num_threads_fb), shared_bytes_fb, 0, dev_times_buffer, dev_mags_buffer, lengths[i], dev_periods, dev_period_dts, *this, dev_hists); hipLaunchKernelGGL(( ConditionalEntropyKernel), dim3(num_blocks_ce), dim3(num_threads_ce), shared_bytes_ce, 0, dev_hists, num_hists, *this, dev_ces); // Copy CE data back to host gpuErrchk(hipMemcpy(&ce_host[i * num_hists], dev_ces, ce_out_size, hipMemcpyDeviceToHost)); } // Free all of the GPU memory gpuErrchk(hipFree(dev_periods)); gpuErrchk(hipFree(dev_period_dts)); gpuErrchk(hipFree(dev_hists)); gpuErrchk(hipFree(dev_ces)); gpuErrchk(hipFree(dev_times_buffer)); gpuErrchk(hipFree(dev_mags_buffer)); return ce_host; }
4be2b590dfbe46a9ddba6519e53da62b038652e6.cu
// Copyright 2020 California Institute of Technology. All rights reserved. // Use of this source code is governed by a BSD-style license that can be // found in the LICENSE file. // Author: Ethan Jaszewski #include "ce.h" #include <algorithm> #include <iostream> #include "cuda_runtime.h" #include "math.h" #include "errchk.cuh" // // Simple ConditionalEntropy Function Definitions // ConditionalEntropy::ConditionalEntropy(size_t n_phase, size_t n_mag, size_t p_overlap, size_t m_overlap) { // Just set number of bins num_phase_bins = n_phase; num_mag_bins = n_mag; // Just set the overlap num_phase_overlap = p_overlap; num_mag_overlap = m_overlap; // Calculate bin size accordingly phase_bin_size = 1.0 / static_cast<float>(n_phase); mag_bin_size = 1.0 / static_cast<float>(n_mag); } __host__ __device__ size_t ConditionalEntropy::NumBins() const { return num_phase_bins * num_mag_bins; } __host__ __device__ size_t ConditionalEntropy::NumPhaseBins() const { return num_phase_bins; } __host__ __device__ size_t ConditionalEntropy::NumMagBins() const { return num_mag_bins; } __host__ __device__ size_t ConditionalEntropy::NumPhaseBinOverlap() const { return num_phase_overlap; } __host__ __device__ size_t ConditionalEntropy::NumMagBinOverlap() const { return num_mag_overlap; } __host__ __device__ float ConditionalEntropy::PhaseBinSize() const { return phase_bin_size; } __host__ __device__ float ConditionalEntropy::MagBinSize() const { return mag_bin_size; } __host__ __device__ size_t ConditionalEntropy::PhaseBin(float phase_val) const { return static_cast<size_t>(phase_val / phase_bin_size); } __host__ __device__ size_t ConditionalEntropy::MagBin(float mag_val) const { return static_cast<size_t>(mag_val / mag_bin_size); } __host__ __device__ size_t ConditionalEntropy::BinIndex(size_t phase_bin, size_t mag_bin) const { return phase_bin * num_mag_bins + mag_bin; } // // CUDA Kernels // /** * Folds and bins the input data across all trial periods and time derivatives. * * This kernel takes in a time-series of paired times and magnitudes, folding * the times according to the given trial periods and time derivatives, * outputting a series of histograms into global memory. * * Each block computes a histogram of the full data series for a given period * and period time derivative. As such, the x-dimension of the grid should match * the number of trial periods, and the y-dimension of the grid should match the * number of trial period time derivatives. * * Internally, the kernel uses shared memory atomics with a 32-bit integer based * histogram, which requires a total of 4 * Histogram Size bytes of shared * memory. Due to the use of shared atomics, this kernel will perform poorly on * pre-Maxwell GPUs. * * Note: All arrays must be device-allocated * * @param times light curve datapoint times * @param mags light curve datapoint magnitudes * @param periods list of trial periods * @param period_dts list of trial period time derivatives * @param h_params histogram parameters * @param hists array of output histograms */ __global__ void FoldBinKernel(const float* times, const float* mags, const size_t length, const float* periods, const float* period_dts, const ConditionalEntropy h_params, float* hists) { // Histogram which this block will produce. const size_t block_id = blockIdx.x * gridDim.y + blockIdx.y; float* block_hist = &hists[h_params.NumBins() * block_id]; // Period and period time derivative for this block. const float period = periods[blockIdx.x]; const float period_dt = period_dts[blockIdx.y]; // Time derivative correction factor. const float pdt_corr = (period_dt / period) / 2; // Shared memory histogram for this thread. extern __shared__ uint32_t sh_hist[]; // Zero the shared memory for this block for (size_t i = threadIdx.x; i < h_params.NumBins(); i += blockDim.x) { sh_hist[i] = 0; } __syncthreads(); float i_part; // Only used for modff. // Accumulate into this thread's histogram (as many points as needed), // simultaneously computing the folded time value for (size_t idx = threadIdx.x; idx < length; idx += blockDim.x) { float t = times[idx]; float t_corr = t - pdt_corr * t * t; float folded = fabsf(modff(t_corr / period, &i_part)); size_t phase_bin = h_params.PhaseBin(folded); size_t mag_bin = h_params.MagBin(mags[idx]); for (size_t i = 0; i < h_params.NumPhaseBinOverlap(); i++) { for (size_t j = 0; j < h_params.NumMagBinOverlap(); j++) { size_t idx = h_params.BinIndex((phase_bin + i) % h_params.NumPhaseBins(), (mag_bin + j) % h_params.NumMagBins()); atomicAdd(&sh_hist[idx], 1); } } } __syncthreads(); size_t div = length * h_params.NumPhaseBinOverlap() * h_params.NumMagBinOverlap(); // Copy the block's histogram into global memory for (size_t i = threadIdx.x; i < h_params.NumBins(); i += blockDim.x) { block_hist[i] = static_cast<float>(sh_hist[i]) / static_cast<float>(div); } } /** * Computes the conditional entropy for the input histograms. * * This kernel takes in an arbitrarily long list of histograms with a given set * of parameters and computes the conditional entropy for each histogram, * outputting a series of values into an array. * * Internally, each thread is responsible for first computing the conditional * entropy of one phase bin of the input (disregarding histogram boundaries), * then the values for each thread are accumulated directly into global memory * to avoid potential inter-block conflicts. The computation uses shared memory * equal to 4 * Number of Threads bytes. * * Note: All arrays must be device-allocated * * @param hists array of input histograms * @param num_hists number of histograms * @param h_params histogram parameters * @param ce_vals output array of conditional entropy values */ __global__ void ConditionalEntropyKernel(const float* hists, const size_t num_hists, const ConditionalEntropy h_params, float* ce_vals) { // Shared memory scratch space extern __shared__ float scratch[]; // Which histogram row this thread is summing size_t idx = blockIdx.x * blockDim.x + threadIdx.x; // Don't compute for out-of-bounds histograms if (idx / h_params.NumPhaseBins() >= num_hists) { return; } // Which shared memory location this thread uses size_t tid = threadIdx.x; // Index in the histogram array corresponding to the start of this row const size_t offset = idx * h_params.NumMagBins(); // Accumulate into shared memory (compute p(phi_j)) scratch[tid] = 0; for (size_t i = 0; i < h_params.NumMagBins(); i++) { scratch[tid] += hists[i + offset]; } // Compute per-phase-bin conditional entropy // TODO: remove use of global mem? float p_j = scratch[tid]; // Store p_j scratch[tid] = 0; // Reset shmem before summing for (size_t i = 0; i < h_params.NumMagBins(); i++) { float p_ij = hists[i + offset]; if (p_ij != 0) { scratch[tid] += p_ij * logf(p_j / p_ij); } } // Accumulate per-phase-bin conditional entropy into total conditional // entropy for the histogram. // TODO: Replace with shared memory reduction of some kind. *yikes* size_t ce_idx = idx / h_params.NumPhaseBins(); atomicAdd(&ce_vals[ce_idx], scratch[tid]); } // // Wrapper Functions // float* ConditionalEntropy::DeviceFoldAndBin(const float* times, const float* mags, const size_t length, const float* periods, const float* period_dts, const size_t num_periods, const size_t num_p_dts) const { // Number of bytes of global memory required to store output size_t bytes = NumBins() * sizeof(float) * num_periods * num_p_dts; // Allocate and zero global memory for output histograms float* dev_hists; gpuErrchk(cudaMalloc(&dev_hists, bytes)); gpuErrchk(cudaMemset(dev_hists, 0, bytes)); // Number of threads and corresponding shared memory usage const size_t num_threads = 512; const size_t shared_bytes = NumBins() * sizeof(uint32_t); // Grid to search over periods and time derivatives const dim3 grid_dim = dim3(num_periods, num_p_dts); // NOTE: A ConditionalEntropy object is small enough that we can pass it in // the registers by dereferencing it. FoldBinKernel<<<grid_dim, num_threads, shared_bytes>>>( times, mags, length, periods, period_dts, *this, dev_hists); return dev_hists; } float* ConditionalEntropy::FoldAndBin(const float* times, const float* mags, const size_t length, const float* periods, const float* period_dts, const size_t num_periods, const size_t num_p_dts) const { // Number of bytes of input data const size_t data_bytes = length * sizeof(float); // Allocate device pointers float* dev_times; float* dev_mags; float* dev_periods; float* dev_period_dts; gpuErrchk(cudaMalloc(&dev_times, data_bytes)); gpuErrchk(cudaMalloc(&dev_mags, data_bytes)); gpuErrchk(cudaMalloc(&dev_periods, num_periods * sizeof(float))); gpuErrchk(cudaMalloc(&dev_period_dts, num_p_dts * sizeof(float))); // Copy data to device memory gpuErrchk(cudaMemcpy(dev_times, times, data_bytes, cudaMemcpyHostToDevice)); gpuErrchk(cudaMemcpy(dev_mags, mags, data_bytes, cudaMemcpyHostToDevice)); gpuErrchk(cudaMemcpy(dev_periods, periods, num_periods * sizeof(float), cudaMemcpyHostToDevice)); gpuErrchk(cudaMemcpy(dev_periods, period_dts, num_p_dts * sizeof(float), cudaMemcpyHostToDevice)); float* dev_hists = DeviceFoldAndBin(dev_times, dev_mags, length, dev_periods, dev_period_dts, num_periods, num_p_dts); // Allocate host histograms and copy from device size_t bytes = NumBins() * num_periods * num_p_dts * sizeof(float); float* hists = (float*)malloc(bytes); gpuErrchk(cudaMemcpy(hists, dev_hists, bytes, cudaMemcpyDeviceToHost)); // Free GPU memory gpuErrchk(cudaFree(dev_times)); gpuErrchk(cudaFree(dev_mags)); gpuErrchk(cudaFree(dev_periods)); gpuErrchk(cudaFree(dev_period_dts)); gpuErrchk(cudaFree(dev_hists)); return hists; } float* ConditionalEntropy::DeviceCalcCEFromHists(const float* hists, const size_t num_hists) const { // Allocate global memory for output conditional entropy values float* dev_ces; gpuErrchk(cudaMalloc(&dev_ces, num_hists * sizeof(float))); const size_t n_t = 512; const size_t n_b = ((num_hists * NumPhaseBins()) / n_t) + 1; // NOTE: A ConditionalEntropy object is small enough that we can pass it in // the registers by dereferencing it. ConditionalEntropyKernel<<<n_b, n_t, n_t * sizeof(float)>>>( hists, num_hists, *this, dev_ces); return dev_ces; } float* ConditionalEntropy::CalcCEFromHists(const float* hists, const size_t num_hists) const { // Number of bytes in the histogram const size_t bytes = num_hists * NumBins() * sizeof(float); // Allocate device memory for histograms and copy over float* dev_hists; gpuErrchk(cudaMalloc(&dev_hists, bytes)); gpuErrchk(cudaMemcpy(dev_hists, hists, bytes, cudaMemcpyHostToDevice)); float* dev_ces = DeviceCalcCEFromHists(dev_hists, num_hists); // Copy CEs to host float* ces = (float*)malloc(num_hists * sizeof(float)); gpuErrchk(cudaMemcpy(ces, dev_ces, num_hists * sizeof(float), cudaMemcpyDeviceToHost)); // Free GPU memory gpuErrchk(cudaFree(dev_hists)); gpuErrchk(cudaFree(dev_ces)); return ces; } float* ConditionalEntropy::CalcCEVals(const float* times, const float* mags, const size_t length, const float* periods, const float* period_dts, const size_t num_periods, const size_t num_p_dts) const { // Number of bytes of input data const size_t data_bytes = length * sizeof(float); const size_t num_hists = num_periods * num_p_dts; // Allocate device pointers float* dev_times; float* dev_mags; float* dev_periods; float* dev_period_dts; gpuErrchk(cudaMalloc(&dev_times, data_bytes)); gpuErrchk(cudaMalloc(&dev_mags, data_bytes)); gpuErrchk(cudaMalloc(&dev_periods, num_periods * sizeof(float))); gpuErrchk(cudaMalloc(&dev_period_dts, num_p_dts * sizeof(float))); // Copy data to device memory gpuErrchk(cudaMemcpy(dev_times, times, data_bytes, cudaMemcpyHostToDevice)); gpuErrchk(cudaMemcpy(dev_mags, mags, data_bytes, cudaMemcpyHostToDevice)); gpuErrchk(cudaMemcpy(dev_periods, periods, num_periods * sizeof(float), cudaMemcpyHostToDevice)); gpuErrchk(cudaMemcpy(dev_period_dts, period_dts, num_p_dts * sizeof(float), cudaMemcpyHostToDevice)); float* dev_hists = DeviceFoldAndBin(dev_times, dev_mags, length, dev_periods, dev_period_dts, num_periods, num_p_dts); float* dev_ces = DeviceCalcCEFromHists(dev_hists, num_hists); // Copy CEs to host float* ces = (float*)malloc(num_hists * sizeof(float)); gpuErrchk(cudaMemcpy(ces, dev_ces, num_hists * sizeof(float), cudaMemcpyDeviceToHost)); // Free intermediate and output values gpuErrchk(cudaFree(dev_hists)); gpuErrchk(cudaFree(dev_ces)); // Free GPU inputs gpuErrchk(cudaFree(dev_times)); gpuErrchk(cudaFree(dev_mags)); gpuErrchk(cudaFree(dev_periods)); gpuErrchk(cudaFree(dev_period_dts)); return ces; } float* ConditionalEntropy::CalcCEValsBatched(const std::vector<float*>& times, const std::vector<float*>& mags, const std::vector<size_t>& lengths, const float* periods, const float* period_dts, const size_t num_periods, const size_t num_p_dts) const { // TODO: Use async memory transferring // TODO: Look at ways of batching data transfer. // Size of one CE out array, and total CE output size. size_t ce_out_size = num_periods * num_p_dts * sizeof(float); size_t ce_size_total = ce_out_size * lengths.size(); // Allocate the output CE array so we can copy to it. float* ce_host = (float*)malloc(ce_size_total); // Copy trial information over float* dev_periods; float* dev_period_dts; gpuErrchk(cudaMalloc(&dev_periods, num_periods * sizeof(float))); gpuErrchk(cudaMalloc(&dev_period_dts, num_p_dts * sizeof(float))); gpuErrchk(cudaMemcpy(dev_periods, periods, num_periods * sizeof(float), cudaMemcpyHostToDevice)); gpuErrchk(cudaMemcpy(dev_period_dts, period_dts, num_p_dts * sizeof(float), cudaMemcpyHostToDevice)); // Intermediate histogram memory size_t num_hists = num_periods * num_p_dts; size_t hist_bytes = NumBins() * sizeof(float) * num_hists; float* dev_hists; gpuErrchk(cudaMalloc(&dev_hists, hist_bytes)); // Intermediate conditional entropy memory float* dev_ces; gpuErrchk(cudaMalloc(&dev_ces, ce_out_size)); // Kernel launch information for the fold & bin step const size_t num_threads_fb = 256; const size_t shared_bytes_fb = NumBins() * sizeof(uint32_t); const dim3 grid_dim_fb = dim3(num_periods, num_p_dts); // Kernel launch information for the ce calculation step const size_t num_threads_ce = 256; const size_t num_blocks_ce = ((num_hists * NumPhaseBins()) / num_threads_ce) + 1; const size_t shared_bytes_ce = num_threads_ce * sizeof(float); // Buffer size (large enough for longest light curve) auto max_length = std::max_element(lengths.begin(), lengths.end()); const size_t buffer_length = *max_length; const size_t buffer_bytes = sizeof(float) * buffer_length; float* dev_times_buffer; float* dev_mags_buffer; gpuErrchk(cudaMalloc(&dev_times_buffer, buffer_bytes)); gpuErrchk(cudaMalloc(&dev_mags_buffer, buffer_bytes)); for (size_t i = 0; i < lengths.size(); i++) { // Copy light curve into device buffer const size_t curve_bytes = lengths[i] * sizeof(float); gpuErrchk(cudaMemcpy(dev_times_buffer, times[i], curve_bytes, cudaMemcpyHostToDevice)); gpuErrchk(cudaMemcpy(dev_mags_buffer, mags[i], curve_bytes, cudaMemcpyHostToDevice)); // Zero conditional entropy output gpuErrchk(cudaMemset(dev_ces, 0, ce_out_size)); // NOTE: A ConditionalEntropy object is small enough that we can pass it // in the registers by dereferencing it. FoldBinKernel<<<grid_dim_fb, num_threads_fb, shared_bytes_fb>>>( dev_times_buffer, dev_mags_buffer, lengths[i], dev_periods, dev_period_dts, *this, dev_hists); ConditionalEntropyKernel<<<num_blocks_ce, num_threads_ce, shared_bytes_ce>>>(dev_hists, num_hists, *this, dev_ces); // Copy CE data back to host gpuErrchk(cudaMemcpy(&ce_host[i * num_hists], dev_ces, ce_out_size, cudaMemcpyDeviceToHost)); } // Free all of the GPU memory gpuErrchk(cudaFree(dev_periods)); gpuErrchk(cudaFree(dev_period_dts)); gpuErrchk(cudaFree(dev_hists)); gpuErrchk(cudaFree(dev_ces)); gpuErrchk(cudaFree(dev_times_buffer)); gpuErrchk(cudaFree(dev_mags_buffer)); return ce_host; }
6ddc0f76ddc9064669938588fa119560bc4e5efb.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /* Naive matrix-matrix multiplication(mmm) By C. Liao */ #include <stdio.h> #include <assert.h> #ifdef _OPENMP #include <omp.h> #endif #define N 1024 #define M 1024 #define K 1024 #define REAL float #include "libxomp.h" #include "xomp_cuda_lib_inlined.cu" int i; int j; int k; float a[1024][1024]; float b[1024][1024]; float c[1024][1024]; float c2[1024][1024]; int init(); int mmm(); int mmm2(); int verify(); int main() { xomp_acc_init(); init(); mmm(); mmm2(); return verify(); } int init() { for (i = 0; i < 1024; i++) for (j = 0; j < 1024; j++) a[i][j] = (3.0 * i * j / 1024 / 1024); for (i = 0; i < 1024; i++) for (j = 0; j < 1024; j++) b[i][j] = (5.0 * j * i / 1024 / 1024); for (i = 0; i < 1024; i++) for (j = 0; j < 1024; j++) { c[i][j] = 0.0; c2[i][j] = 0.0; } return 0; } /* TODO: try different i,j,k orders a b e f a*e+ b*g , a*f+ b*h c d x g h = c*e+ d*g, c*f+ d*h */ __global__ void OUT__1__11560__(float *_dev_a,float *_dev_b,float *_dev_c) { int _p_i; int _p_j; int _p_k; int _dev_lower; int _dev_upper; int _dev_loop_chunk_size; int _dev_loop_sched_index; int _dev_loop_stride; int _dev_thread_num = getCUDABlockThreadCount(1); int _dev_thread_id = getLoopIndexFromCUDAVariables(1); XOMP_static_sched_init(0,1023,1,1,_dev_thread_num,_dev_thread_id,&_dev_loop_chunk_size,&_dev_loop_sched_index,&_dev_loop_stride); while(XOMP_static_sched_next(&_dev_loop_sched_index,1023,1,_dev_loop_stride,_dev_loop_chunk_size,_dev_thread_num,_dev_thread_id,&_dev_lower,&_dev_upper)) for (_p_i = _dev_lower; _p_i <= _dev_upper; _p_i += 1) { for (_p_j = 0; _p_j < 1024; _p_j++) for (_p_k = 0; _p_k < 1024; _p_k++) _dev_c[_p_i * 1024 + _p_j] = _dev_c[_p_i * 1024 + _p_j] + _dev_a[_p_i * 1024 + _p_k] * _dev_b[_p_k * 1024 + _p_j]; } } int mmm() { //For static arrays with known dimension info. , no array section info. is needed //#pragma omp target map(tofrom:c[0:N][0:M]), map(to:a[0:N][0:M],b[0:M][0:K]) { xomp_deviceDataEnvironmentEnter(0); float *_dev_a; int _dev_a_size[2] = {1024, 1024}; int _dev_a_offset[2] = {0, 0}; int _dev_a_Dim[2] = {1024, 1024}; _dev_a = ((float *)(xomp_deviceDataEnvironmentPrepareVariable(0,(void *)a,2,sizeof(float ),_dev_a_size,_dev_a_offset,_dev_a_Dim,1,0))); float *_dev_b; int _dev_b_size[2] = {1024, 1024}; int _dev_b_offset[2] = {0, 0}; int _dev_b_Dim[2] = {1024, 1024}; _dev_b = ((float *)(xomp_deviceDataEnvironmentPrepareVariable(0,(void *)b,2,sizeof(float ),_dev_b_size,_dev_b_offset,_dev_b_Dim,1,0))); float *_dev_c; int _dev_c_size[2] = {1024, 1024}; int _dev_c_offset[2] = {0, 0}; int _dev_c_Dim[2] = {1024, 1024}; _dev_c = ((float *)(xomp_deviceDataEnvironmentPrepareVariable(0,(void *)c,2,sizeof(float ),_dev_c_size,_dev_c_offset,_dev_c_Dim,1,1))); /* Launch CUDA kernel ... */ int _threads_per_block_ = xomp_get_maxThreadsPerBlock(0); int _num_blocks_ = xomp_get_max1DBlock(0,1023 - 0 + 1); hipLaunchKernelGGL(( OUT__1__11560__), dim3(_num_blocks_),dim3(_threads_per_block_), 0, 0, _dev_a,_dev_b,_dev_c); xomp_deviceDataEnvironmentExit(0); } return 0; } int mmm2() { for (i = 0; i < 1024; i++) for (j = 0; j < 1024; j++) for (k = 0; k < 1024; k++) c2[i][j] = c2[i][j] + a[i][k] * b[k][j]; return 0; } int verify() { float sum = 0.0; float sum2 = 0.0; for (i = 0; i < 1024; i++) for (j = 0; j < 1024; j++) { sum += c[i][j]; sum2 += c2[i][j]; } printf("sum of c[i][j] is %f\n",sum); printf("sum of c2[i][j] is %f\n",sum2); sum == sum2?((void )0) : __assert_fail("sum == sum2","matrixmultiply-ompacc.c",94,__PRETTY_FUNCTION__); return 0; }
6ddc0f76ddc9064669938588fa119560bc4e5efb.cu
/* Naive matrix-matrix multiplication(mmm) By C. Liao */ #include <stdio.h> #include <assert.h> #ifdef _OPENMP #include <omp.h> #endif #define N 1024 #define M 1024 #define K 1024 #define REAL float #include "libxomp.h" #include "xomp_cuda_lib_inlined.cu" int i; int j; int k; float a[1024][1024]; float b[1024][1024]; float c[1024][1024]; float c2[1024][1024]; int init(); int mmm(); int mmm2(); int verify(); int main() { xomp_acc_init(); init(); mmm(); mmm2(); return verify(); } int init() { for (i = 0; i < 1024; i++) for (j = 0; j < 1024; j++) a[i][j] = (3.0 * i * j / 1024 / 1024); for (i = 0; i < 1024; i++) for (j = 0; j < 1024; j++) b[i][j] = (5.0 * j * i / 1024 / 1024); for (i = 0; i < 1024; i++) for (j = 0; j < 1024; j++) { c[i][j] = 0.0; c2[i][j] = 0.0; } return 0; } /* TODO: try different i,j,k orders a b e f a*e+ b*g , a*f+ b*h c d x g h = c*e+ d*g, c*f+ d*h */ __global__ void OUT__1__11560__(float *_dev_a,float *_dev_b,float *_dev_c) { int _p_i; int _p_j; int _p_k; int _dev_lower; int _dev_upper; int _dev_loop_chunk_size; int _dev_loop_sched_index; int _dev_loop_stride; int _dev_thread_num = getCUDABlockThreadCount(1); int _dev_thread_id = getLoopIndexFromCUDAVariables(1); XOMP_static_sched_init(0,1023,1,1,_dev_thread_num,_dev_thread_id,&_dev_loop_chunk_size,&_dev_loop_sched_index,&_dev_loop_stride); while(XOMP_static_sched_next(&_dev_loop_sched_index,1023,1,_dev_loop_stride,_dev_loop_chunk_size,_dev_thread_num,_dev_thread_id,&_dev_lower,&_dev_upper)) for (_p_i = _dev_lower; _p_i <= _dev_upper; _p_i += 1) { for (_p_j = 0; _p_j < 1024; _p_j++) for (_p_k = 0; _p_k < 1024; _p_k++) _dev_c[_p_i * 1024 + _p_j] = _dev_c[_p_i * 1024 + _p_j] + _dev_a[_p_i * 1024 + _p_k] * _dev_b[_p_k * 1024 + _p_j]; } } int mmm() { //For static arrays with known dimension info. , no array section info. is needed //#pragma omp target map(tofrom:c[0:N][0:M]), map(to:a[0:N][0:M],b[0:M][0:K]) { xomp_deviceDataEnvironmentEnter(0); float *_dev_a; int _dev_a_size[2] = {1024, 1024}; int _dev_a_offset[2] = {0, 0}; int _dev_a_Dim[2] = {1024, 1024}; _dev_a = ((float *)(xomp_deviceDataEnvironmentPrepareVariable(0,(void *)a,2,sizeof(float ),_dev_a_size,_dev_a_offset,_dev_a_Dim,1,0))); float *_dev_b; int _dev_b_size[2] = {1024, 1024}; int _dev_b_offset[2] = {0, 0}; int _dev_b_Dim[2] = {1024, 1024}; _dev_b = ((float *)(xomp_deviceDataEnvironmentPrepareVariable(0,(void *)b,2,sizeof(float ),_dev_b_size,_dev_b_offset,_dev_b_Dim,1,0))); float *_dev_c; int _dev_c_size[2] = {1024, 1024}; int _dev_c_offset[2] = {0, 0}; int _dev_c_Dim[2] = {1024, 1024}; _dev_c = ((float *)(xomp_deviceDataEnvironmentPrepareVariable(0,(void *)c,2,sizeof(float ),_dev_c_size,_dev_c_offset,_dev_c_Dim,1,1))); /* Launch CUDA kernel ... */ int _threads_per_block_ = xomp_get_maxThreadsPerBlock(0); int _num_blocks_ = xomp_get_max1DBlock(0,1023 - 0 + 1); OUT__1__11560__<<<_num_blocks_,_threads_per_block_>>>(_dev_a,_dev_b,_dev_c); xomp_deviceDataEnvironmentExit(0); } return 0; } int mmm2() { for (i = 0; i < 1024; i++) for (j = 0; j < 1024; j++) for (k = 0; k < 1024; k++) c2[i][j] = c2[i][j] + a[i][k] * b[k][j]; return 0; } int verify() { float sum = 0.0; float sum2 = 0.0; for (i = 0; i < 1024; i++) for (j = 0; j < 1024; j++) { sum += c[i][j]; sum2 += c2[i][j]; } printf("sum of c[i][j] is %f\n",sum); printf("sum of c2[i][j] is %f\n",sum2); sum == sum2?((void )0) : __assert_fail("sum == sum2","matrixmultiply-ompacc.c",94,__PRETTY_FUNCTION__); return 0; }
819286de228d5486f5e783379c93cc9698a51120.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /* * Copyright 1993-2006 NVIDIA Corporation. All rights reserved. * * NOTICE TO USER: * * This source code is subject to NVIDIA ownership rights under U.S. and * international Copyright laws. * * NVIDIA MAKES NO REPRESENTATION ABOUT THE SUITABILITY OF THIS SOURCE * CODE FOR ANY PURPOSE. IT IS PROVIDED "AS IS" WITHOUT EXPRESS OR * IMPLIED WARRANTY OF ANY KIND. NVIDIA DISCLAIMS ALL WARRANTIES WITH * REGARD TO THIS SOURCE CODE, INCLUDING ALL IMPLIED WARRANTIES OF * MERCHANTABILITY, NONINFRINGEMENT, AND FITNESS FOR A PARTICULAR PURPOSE. * IN NO EVENT SHALL NVIDIA BE LIABLE FOR ANY SPECIAL, INDIRECT, INCIDENTAL, * OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS * OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE * OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE * OR PERFORMANCE OF THIS SOURCE CODE. * * U.S. Government End Users. This source code is a "commercial item" as * that term is defined at 48 C.F.R. 2.101 (OCT 1995), consisting of * "commercial computer software" and "commercial computer software * documentation" as such terms are used in 48 C.F.R. 12.212 (SEPT 1995) * and is provided to the U.S. Government only as a commercial end item. * Consistent with 48 C.F.R.12.212 and 48 C.F.R. 227.7202-1 through * 227.7202-4 (JUNE 1995), all U.S. Government End Users acquire the * source code with only those rights set forth herein. */ #ifndef _VOLUMERENDER_KERNEL_CU_ #define _VOLUMERENDER_KERNEL_CU_ #include <cutil_inline.h> #include <cutil_math.h> typedef unsigned int uint; typedef unsigned char uchar; hipArray *d_volumeArray = 0; hipArray *d_transferFuncArray; texture<uchar, 3, hipReadModeNormalizedFloat> tex; // 3D texture texture<float4, 1, hipReadModeElementType> transferTex; // 1D transfer function texture typedef struct { float4 m[3]; } float3x4; __constant__ float3x4 c_invViewMatrix; // inverse view matrix struct Ray { float3 o; // origin float3 d; // direction }; // intersect ray with a box // http://www.siggraph.org/education/materials/HyperGraph/raytrace/rtinter3.htm __device__ int intersectBox(Ray r, float3 boxmin, float3 boxmax, float *tnear, float *tfar) { // compute intersection of ray with all six bbox planes float3 invR = make_float3(1.0f) / r.d; float3 tbot = invR * (boxmin - r.o); float3 ttop = invR * (boxmax - r.o); // re-order intersections to find smallest and largest on each axis float3 tmin = fminf(ttop, tbot); float3 tmax = fmaxf(ttop, tbot); // find the largest tmin and the smallest tmax float largest_tmin = fmaxf(fmaxf(tmin.x, tmin.y), fmaxf(tmin.x, tmin.z)); float smallest_tmax = fminf(fminf(tmax.x, tmax.y), fminf(tmax.x, tmax.z)); *tnear = largest_tmin; *tfar = smallest_tmax; return smallest_tmax > largest_tmin; } // transform vector by matrix (no translation) __device__ float3 mul(const float3x4 &M, const float3 &v) { float3 r; r.x = dot(v, make_float3(M.m[0])); r.y = dot(v, make_float3(M.m[1])); r.z = dot(v, make_float3(M.m[2])); return r; } // transform vector by matrix with translation __device__ float4 mul(const float3x4 &M, const float4 &v) { float4 r; r.x = dot(v, M.m[0]); r.y = dot(v, M.m[1]); r.z = dot(v, M.m[2]); r.w = 1.0f; return r; } __device__ uint rgbaFloatToInt(float4 rgba) { rgba.x = __saturatef(rgba.x); // clamp to [0.0, 1.0] rgba.y = __saturatef(rgba.y); rgba.z = __saturatef(rgba.z); rgba.w = __saturatef(rgba.w); return (uint(rgba.w*255)<<24) | (uint(rgba.z*255)<<16) | (uint(rgba.y*255)<<8) | uint(rgba.x*255); } __global__ void d_render(uint *d_output, uint imageW, uint imageH, float density, float brightness, float transferOffset, float transferScale) { int maxSteps = 500; float tstep = 0.01f; float3 boxMin = make_float3(-1.0f, -1.0f, -1.0f); float3 boxMax = make_float3(1.0f, 1.0f, 1.0f); uint x = __umul24(blockIdx.x, blockDim.x) + threadIdx.x; uint y = __umul24(blockIdx.y, blockDim.y) + threadIdx.y; float u = (x / (float) imageW)*2.0f-1.0f; float v = (y / (float) imageH)*2.0f-1.0f; // calculate eye ray in world space Ray eyeRay; eyeRay.o = make_float3(mul(c_invViewMatrix, make_float4(0.0f, 0.0f, 0.0f, 1.0f))); eyeRay.d = normalize(make_float3(u, v, -2.0f)); eyeRay.d = mul(c_invViewMatrix, eyeRay.d); // find intersection with box float tnear, tfar; int hit = intersectBox(eyeRay, boxMin, boxMax, &tnear, &tfar); if (!hit) return; if (tnear < 0.0f) tnear = 0.0f; // clamp to near plane // march along ray from back to front, accumulating color float4 sum = make_float4(0.0f);; float t = tfar; for(int i=0; i<maxSteps; i++) { float3 pos = eyeRay.o + eyeRay.d*t; pos = pos*0.5f+0.5f; // map position to [0, 1] coordinates // read from 3D texture float sample = tex3D(tex, pos.x, pos.y, pos.z); // lookup in transfer function texture float4 col = tex1D(transferTex, (sample-transferOffset)*transferScale); // accumulate result sum = lerp(sum, col, col.w*density); t -= tstep; if (t < tnear) break; } sum *= brightness; if ((x < imageW) && (y < imageH)) { // write output color uint i = __umul24(y, imageW) + x; d_output[i] = rgbaFloatToInt(sum); } } extern "C" void setTextureFilterMode(bool bLinearFilter) { tex.filterMode = bLinearFilter ? hipFilterModeLinear : hipFilterModePoint; } extern "C" void initCuda(uchar *h_volume, hipExtent volumeSize) { // create 3D array hipChannelFormatDesc channelDesc = hipCreateChannelDesc<uchar>(); cutilSafeCall( hipMalloc3DArray(&d_volumeArray, &channelDesc, volumeSize) ); // copy data to 3D array hipMemcpy3DParms copyParams = {0}; copyParams.srcPtr = make_hipPitchedPtr((void*)h_volume, volumeSize.width*sizeof(uchar), volumeSize.width, volumeSize.height); copyParams.dstArray = d_volumeArray; copyParams.extent = volumeSize; copyParams.kind = hipMemcpyHostToDevice; cutilSafeCall( hipMemcpy3D(&copyParams) ); // set texture parameters tex.normalized = true; // access with normalized texture coordinates tex.filterMode = hipFilterModeLinear; // linear interpolation tex.addressMode[0] = hipAddressModeClamp; // wrap texture coordinates tex.addressMode[1] = hipAddressModeClamp; // bind array to 3D texture cutilSafeCall(hipBindTextureToArray(tex, d_volumeArray, channelDesc)); // create transfer function texture float4 transferFunc[] = { { 0.0, 0.0, 0.0, 0.0, }, { 1.0, 0.0, 0.0, 1.0, }, { 1.0, 0.5, 0.0, 1.0, }, { 1.0, 1.0, 0.0, 1.0, }, { 0.0, 1.0, 0.0, 1.0, }, { 0.0, 1.0, 1.0, 1.0, }, { 0.0, 0.0, 1.0, 1.0, }, { 1.0, 0.0, 1.0, 1.0, }, { 0.0, 0.0, 0.0, 0.0, }, }; hipChannelFormatDesc channelDesc2 = hipCreateChannelDesc<float4>(); hipArray* d_transferFuncArray; cutilSafeCall(hipMallocArray( &d_transferFuncArray, &channelDesc2, sizeof(transferFunc)/sizeof(float4), 1)); cutilSafeCall(hipMemcpyToArray( d_transferFuncArray, 0, 0, transferFunc, sizeof(transferFunc), hipMemcpyHostToDevice)); transferTex.filterMode = hipFilterModeLinear; transferTex.normalized = true; // access with normalized texture coordinates transferTex.addressMode[0] = hipAddressModeClamp; // wrap texture coordinates // Bind the array to the texture cutilSafeCall( hipBindTextureToArray( transferTex, d_transferFuncArray, channelDesc2)); } extern "C" void freeCudaBuffers() { cutilSafeCall(hipFreeArray(d_volumeArray)); cutilSafeCall(hipFreeArray(d_transferFuncArray)); } extern "C" void render_kernel(dim3 gridSize, dim3 blockSize, uint *d_output, uint imageW, uint imageH, float density, float brightness, float transferOffset, float transferScale) { hipLaunchKernelGGL(( d_render), dim3(gridSize), dim3(blockSize), 0, 0, d_output, imageW, imageH, density, brightness, transferOffset, transferScale); } extern "C" void copyInvViewMatrix(float *invViewMatrix, size_t sizeofMatrix) { cutilSafeCall( hipMemcpyToSymbol(c_invViewMatrix, invViewMatrix, sizeofMatrix) ); } #endif // #ifndef _VOLUMERENDER_KERNEL_CU_
819286de228d5486f5e783379c93cc9698a51120.cu
/* * Copyright 1993-2006 NVIDIA Corporation. All rights reserved. * * NOTICE TO USER: * * This source code is subject to NVIDIA ownership rights under U.S. and * international Copyright laws. * * NVIDIA MAKES NO REPRESENTATION ABOUT THE SUITABILITY OF THIS SOURCE * CODE FOR ANY PURPOSE. IT IS PROVIDED "AS IS" WITHOUT EXPRESS OR * IMPLIED WARRANTY OF ANY KIND. NVIDIA DISCLAIMS ALL WARRANTIES WITH * REGARD TO THIS SOURCE CODE, INCLUDING ALL IMPLIED WARRANTIES OF * MERCHANTABILITY, NONINFRINGEMENT, AND FITNESS FOR A PARTICULAR PURPOSE. * IN NO EVENT SHALL NVIDIA BE LIABLE FOR ANY SPECIAL, INDIRECT, INCIDENTAL, * OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS * OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE * OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE * OR PERFORMANCE OF THIS SOURCE CODE. * * U.S. Government End Users. This source code is a "commercial item" as * that term is defined at 48 C.F.R. 2.101 (OCT 1995), consisting of * "commercial computer software" and "commercial computer software * documentation" as such terms are used in 48 C.F.R. 12.212 (SEPT 1995) * and is provided to the U.S. Government only as a commercial end item. * Consistent with 48 C.F.R.12.212 and 48 C.F.R. 227.7202-1 through * 227.7202-4 (JUNE 1995), all U.S. Government End Users acquire the * source code with only those rights set forth herein. */ #ifndef _VOLUMERENDER_KERNEL_CU_ #define _VOLUMERENDER_KERNEL_CU_ #include <cutil_inline.h> #include <cutil_math.h> typedef unsigned int uint; typedef unsigned char uchar; cudaArray *d_volumeArray = 0; cudaArray *d_transferFuncArray; texture<uchar, 3, cudaReadModeNormalizedFloat> tex; // 3D texture texture<float4, 1, cudaReadModeElementType> transferTex; // 1D transfer function texture typedef struct { float4 m[3]; } float3x4; __constant__ float3x4 c_invViewMatrix; // inverse view matrix struct Ray { float3 o; // origin float3 d; // direction }; // intersect ray with a box // http://www.siggraph.org/education/materials/HyperGraph/raytrace/rtinter3.htm __device__ int intersectBox(Ray r, float3 boxmin, float3 boxmax, float *tnear, float *tfar) { // compute intersection of ray with all six bbox planes float3 invR = make_float3(1.0f) / r.d; float3 tbot = invR * (boxmin - r.o); float3 ttop = invR * (boxmax - r.o); // re-order intersections to find smallest and largest on each axis float3 tmin = fminf(ttop, tbot); float3 tmax = fmaxf(ttop, tbot); // find the largest tmin and the smallest tmax float largest_tmin = fmaxf(fmaxf(tmin.x, tmin.y), fmaxf(tmin.x, tmin.z)); float smallest_tmax = fminf(fminf(tmax.x, tmax.y), fminf(tmax.x, tmax.z)); *tnear = largest_tmin; *tfar = smallest_tmax; return smallest_tmax > largest_tmin; } // transform vector by matrix (no translation) __device__ float3 mul(const float3x4 &M, const float3 &v) { float3 r; r.x = dot(v, make_float3(M.m[0])); r.y = dot(v, make_float3(M.m[1])); r.z = dot(v, make_float3(M.m[2])); return r; } // transform vector by matrix with translation __device__ float4 mul(const float3x4 &M, const float4 &v) { float4 r; r.x = dot(v, M.m[0]); r.y = dot(v, M.m[1]); r.z = dot(v, M.m[2]); r.w = 1.0f; return r; } __device__ uint rgbaFloatToInt(float4 rgba) { rgba.x = __saturatef(rgba.x); // clamp to [0.0, 1.0] rgba.y = __saturatef(rgba.y); rgba.z = __saturatef(rgba.z); rgba.w = __saturatef(rgba.w); return (uint(rgba.w*255)<<24) | (uint(rgba.z*255)<<16) | (uint(rgba.y*255)<<8) | uint(rgba.x*255); } __global__ void d_render(uint *d_output, uint imageW, uint imageH, float density, float brightness, float transferOffset, float transferScale) { int maxSteps = 500; float tstep = 0.01f; float3 boxMin = make_float3(-1.0f, -1.0f, -1.0f); float3 boxMax = make_float3(1.0f, 1.0f, 1.0f); uint x = __umul24(blockIdx.x, blockDim.x) + threadIdx.x; uint y = __umul24(blockIdx.y, blockDim.y) + threadIdx.y; float u = (x / (float) imageW)*2.0f-1.0f; float v = (y / (float) imageH)*2.0f-1.0f; // calculate eye ray in world space Ray eyeRay; eyeRay.o = make_float3(mul(c_invViewMatrix, make_float4(0.0f, 0.0f, 0.0f, 1.0f))); eyeRay.d = normalize(make_float3(u, v, -2.0f)); eyeRay.d = mul(c_invViewMatrix, eyeRay.d); // find intersection with box float tnear, tfar; int hit = intersectBox(eyeRay, boxMin, boxMax, &tnear, &tfar); if (!hit) return; if (tnear < 0.0f) tnear = 0.0f; // clamp to near plane // march along ray from back to front, accumulating color float4 sum = make_float4(0.0f);; float t = tfar; for(int i=0; i<maxSteps; i++) { float3 pos = eyeRay.o + eyeRay.d*t; pos = pos*0.5f+0.5f; // map position to [0, 1] coordinates // read from 3D texture float sample = tex3D(tex, pos.x, pos.y, pos.z); // lookup in transfer function texture float4 col = tex1D(transferTex, (sample-transferOffset)*transferScale); // accumulate result sum = lerp(sum, col, col.w*density); t -= tstep; if (t < tnear) break; } sum *= brightness; if ((x < imageW) && (y < imageH)) { // write output color uint i = __umul24(y, imageW) + x; d_output[i] = rgbaFloatToInt(sum); } } extern "C" void setTextureFilterMode(bool bLinearFilter) { tex.filterMode = bLinearFilter ? cudaFilterModeLinear : cudaFilterModePoint; } extern "C" void initCuda(uchar *h_volume, cudaExtent volumeSize) { // create 3D array cudaChannelFormatDesc channelDesc = cudaCreateChannelDesc<uchar>(); cutilSafeCall( cudaMalloc3DArray(&d_volumeArray, &channelDesc, volumeSize) ); // copy data to 3D array cudaMemcpy3DParms copyParams = {0}; copyParams.srcPtr = make_cudaPitchedPtr((void*)h_volume, volumeSize.width*sizeof(uchar), volumeSize.width, volumeSize.height); copyParams.dstArray = d_volumeArray; copyParams.extent = volumeSize; copyParams.kind = cudaMemcpyHostToDevice; cutilSafeCall( cudaMemcpy3D(&copyParams) ); // set texture parameters tex.normalized = true; // access with normalized texture coordinates tex.filterMode = cudaFilterModeLinear; // linear interpolation tex.addressMode[0] = cudaAddressModeClamp; // wrap texture coordinates tex.addressMode[1] = cudaAddressModeClamp; // bind array to 3D texture cutilSafeCall(cudaBindTextureToArray(tex, d_volumeArray, channelDesc)); // create transfer function texture float4 transferFunc[] = { { 0.0, 0.0, 0.0, 0.0, }, { 1.0, 0.0, 0.0, 1.0, }, { 1.0, 0.5, 0.0, 1.0, }, { 1.0, 1.0, 0.0, 1.0, }, { 0.0, 1.0, 0.0, 1.0, }, { 0.0, 1.0, 1.0, 1.0, }, { 0.0, 0.0, 1.0, 1.0, }, { 1.0, 0.0, 1.0, 1.0, }, { 0.0, 0.0, 0.0, 0.0, }, }; cudaChannelFormatDesc channelDesc2 = cudaCreateChannelDesc<float4>(); cudaArray* d_transferFuncArray; cutilSafeCall(cudaMallocArray( &d_transferFuncArray, &channelDesc2, sizeof(transferFunc)/sizeof(float4), 1)); cutilSafeCall(cudaMemcpyToArray( d_transferFuncArray, 0, 0, transferFunc, sizeof(transferFunc), cudaMemcpyHostToDevice)); transferTex.filterMode = cudaFilterModeLinear; transferTex.normalized = true; // access with normalized texture coordinates transferTex.addressMode[0] = cudaAddressModeClamp; // wrap texture coordinates // Bind the array to the texture cutilSafeCall( cudaBindTextureToArray( transferTex, d_transferFuncArray, channelDesc2)); } extern "C" void freeCudaBuffers() { cutilSafeCall(cudaFreeArray(d_volumeArray)); cutilSafeCall(cudaFreeArray(d_transferFuncArray)); } extern "C" void render_kernel(dim3 gridSize, dim3 blockSize, uint *d_output, uint imageW, uint imageH, float density, float brightness, float transferOffset, float transferScale) { d_render<<<gridSize, blockSize>>>( d_output, imageW, imageH, density, brightness, transferOffset, transferScale); } extern "C" void copyInvViewMatrix(float *invViewMatrix, size_t sizeofMatrix) { cutilSafeCall( cudaMemcpyToSymbol(c_invViewMatrix, invViewMatrix, sizeofMatrix) ); } #endif // #ifndef _VOLUMERENDER_KERNEL_CU_
64edf3b10ddeb600f1573360e00b13ecfb3b59c7.hip
// !!! This is a file automatically generated by hipify!!! #include "THHUNN.h" #include "TH/THHalf.h" #include "THHHalfAutoNumerics.cuh" #include <THH/THHApply.cuh> #if defined(_MSC_VER) || defined(__HIP_PLATFORM_HCC__) #define ZERO_MACRO zero<T>() template <typename T> inline __device__ typename std::enable_if<std::is_same<T, double>::value, T>::type zero() { return 0.; } template <typename T> inline __device__ typename std::enable_if<!std::is_same<T, double>::value, T>::type zero() { return 0.f; } #else #define ZERO_MACRO 0.f #endif template <typename T> struct logSigmoid_updateOutput_functor { __device__ void operator()(T *output, const T *input) const { const T max = fmaxType(ZERO_MACRO, -*input); const T z = THCNumerics<T>::exp(-max) + THCNumerics<T>::exp(-*input -max); *output = -(max + THCNumerics<T>::log(z)); } }; template <typename T> struct logSigmoid_updateGradInput_functor { __device__ void operator()(T *gradInput, const T *input, const T *gradOutput) const { const T max = fmaxType(ZERO_MACRO, -*input); const T z = THCNumerics<T>::exp(-max) + THCNumerics<T>::exp(-*input -max); T max_deriv = 0.f; T sign = -1.f; if (*input < 0.f){ max_deriv = -1.f; sign = 1.f; } *gradInput = *gradOutput * (-max_deriv - sign*((z - 1.f)/z)); } }; template <> struct logSigmoid_updateOutput_functor<half> { __device__ __forceinline__ void operator()(half* output, const half *input) const { float in = __half2float(*input); float max = fmaxType(0.f, -in); float z = THCNumerics<float>::exp(-max) + THCNumerics<float>::exp(-in - max); *output = __float2half(-(max + THCNumerics<float>::log(z))); } }; template <> struct logSigmoid_updateGradInput_functor<half> { __device__ __forceinline__ void operator()(half* gradInput, const half *input, const half *gradOutput) const { const float in = __half2float(*input); const float max = fmaxType(0.f, -in); const float z = THCNumerics<float>::exp(-max) + THCNumerics<float>::exp(-in - max); const float go = __half2float(*gradOutput); float max_deriv = 0.f; float sign = -1.f; if(in < 0.f){ max_deriv = -1.f; sign = 1.f; } *gradInput = __float2half(go * (-max_deriv - sign*((z - 1.f)/z))); } }; #include "generic/LogSigmoid.cu" #include "THHGenerateFloatTypes.h"
64edf3b10ddeb600f1573360e00b13ecfb3b59c7.cu
#include "THCUNN.h" #include "TH/THHalf.h" #include "THCHalfAutoNumerics.cuh" #include <THC/THCApply.cuh> #if defined(_MSC_VER) || defined(__HIP_PLATFORM_HCC__) #define ZERO_MACRO zero<T>() template <typename T> inline __device__ typename std::enable_if<std::is_same<T, double>::value, T>::type zero() { return 0.; } template <typename T> inline __device__ typename std::enable_if<!std::is_same<T, double>::value, T>::type zero() { return 0.f; } #else #define ZERO_MACRO 0.f #endif template <typename T> struct logSigmoid_updateOutput_functor { __device__ void operator()(T *output, const T *input) const { const T max = fmaxType(ZERO_MACRO, -*input); const T z = THCNumerics<T>::exp(-max) + THCNumerics<T>::exp(-*input -max); *output = -(max + THCNumerics<T>::log(z)); } }; template <typename T> struct logSigmoid_updateGradInput_functor { __device__ void operator()(T *gradInput, const T *input, const T *gradOutput) const { const T max = fmaxType(ZERO_MACRO, -*input); const T z = THCNumerics<T>::exp(-max) + THCNumerics<T>::exp(-*input -max); T max_deriv = 0.f; T sign = -1.f; if (*input < 0.f){ max_deriv = -1.f; sign = 1.f; } *gradInput = *gradOutput * (-max_deriv - sign*((z - 1.f)/z)); } }; template <> struct logSigmoid_updateOutput_functor<half> { __device__ __forceinline__ void operator()(half* output, const half *input) const { float in = __half2float(*input); float max = fmaxType(0.f, -in); float z = THCNumerics<float>::exp(-max) + THCNumerics<float>::exp(-in - max); *output = __float2half(-(max + THCNumerics<float>::log(z))); } }; template <> struct logSigmoid_updateGradInput_functor<half> { __device__ __forceinline__ void operator()(half* gradInput, const half *input, const half *gradOutput) const { const float in = __half2float(*input); const float max = fmaxType(0.f, -in); const float z = THCNumerics<float>::exp(-max) + THCNumerics<float>::exp(-in - max); const float go = __half2float(*gradOutput); float max_deriv = 0.f; float sign = -1.f; if(in < 0.f){ max_deriv = -1.f; sign = 1.f; } *gradInput = __float2half(go * (-max_deriv - sign*((z - 1.f)/z))); } }; #include "generic/LogSigmoid.cu" #include "THCGenerateFloatTypes.h"
25122622ac75dad3b01ddc26ff71dd77cf8623d0.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include <heteroflow/heteroflow.hpp> #include <vector> #include <cassert> // Compilation: nvcc -O2 -g ./unittest/heteroflow.cu -std=c++14 -I . __global__ void assign_value(int n, float a, float *x) { // Get the corresponding idx int i = blockIdx.x*blockDim.x + threadIdx.x; if (i < n) { x[i] = a; } } __global__ void add(int n, float *x, float *y) { // Get the corresponding idx int i = blockIdx.x*blockDim.x + threadIdx.x; if (i < n) { y[i] += x[i]; } } void gpu_only() { const int N = 1 << 12; int num_devices {-1}; hipGetDeviceCount(&num_devices); hf::Executor executor(1, num_devices); hf::Heteroflow hf("unittest"); const int num_graphs = 1000; std::vector<std::vector<float>> xs (num_graphs); std::vector<std::vector<float>> ys (num_graphs); std::vector<std::vector<float>> zs (num_graphs); for(int i=0; i<num_graphs; i++) { auto &x = xs[i]; auto &y = ys[i]; auto &z = zs[i]; x.resize(N, 1.0f); y.resize(N, 2.0f); z.resize(N, 3.0f); auto pull_x = hf.pull(x); auto pull_y = hf.pull(y); auto pull_z = hf.pull(z); auto kx = hf.kernel(assign_value, N, 1.0f, pull_x) .grid_x((N+255)/256) .block_x(256); auto ky = hf.kernel(assign_value, N, 2.0f, pull_y) .grid_x((N+255)/256) .block_x(256); pull_x.precede(kx); pull_y.precede(ky); auto kxy = hf.kernel(add, N, pull_x, pull_y) .grid_x((N+255)/256) .block_x(256).name("KXY") .succeed(kx, ky); auto kzy = hf.kernel(add, N, pull_y, pull_z) .grid_x((N+255)/256) .block_x(256).name("KZY") .succeed(pull_z, kxy); auto push_x = hf.push(pull_x, x).succeed(kzy); auto push_y = hf.push(pull_y, y).succeed(kzy); auto push_z = hf.push(pull_z, z).succeed(kzy); } //std::cout << hf.dump() << "\n"; exit(0); executor.run(hf).wait(); hipDeviceSynchronize(); for(int j=0; j<num_graphs; j++) { for (int i = 0; i < N; i++) { assert(zs[j][i]-6.0f == 0.0f); } } std::cout << "Done\n"; } void mix() { const int N = 1 << 12; int num_devices {-1}; hipGetDeviceCount(&num_devices); hf::Executor executor(40, num_devices); hf::Heteroflow hf("unittest"); const int num_graphs = 1000; std::vector<std::vector<float>> xs (num_graphs); std::vector<std::vector<float>> ys (num_graphs); std::vector<std::vector<float>> zs (num_graphs); std::vector<hf::PullTask> pull_tasks; std::vector<hf::PushTask> push_tasks; for(int i=0; i<num_graphs; i++) { auto &x = xs[i]; auto &y = ys[i]; auto &z = zs[i]; x.resize(N, 1.0f); y.resize(N, 2.0f); z.resize(N, 3.0f); auto pull_x = hf.pull(x); auto pull_y = hf.pull(y); auto pull_z = hf.pull(z); pull_tasks.emplace_back(pull_x); pull_tasks.emplace_back(pull_y); pull_tasks.emplace_back(pull_z); auto kx = hf.kernel(assign_value, N, 1.0f, pull_x) .grid_x((N+255)/256) .block_x(256); auto ky = hf.kernel(assign_value, N, 2.0f, pull_y) .grid_x((N+255)/256) .block_x(256); pull_x.precede(kx); pull_y.precede(ky); auto kxy = hf.kernel(add, N, pull_x, pull_y) .grid_x((N+255)/256) .block_x(256).name("KXY") .succeed(kx, ky); auto kzy = hf.kernel(add, N, pull_y, pull_z) .grid_x((N+255)/256) .block_x(256).name("KZY") .succeed(pull_z, kxy); auto push_x = hf.push(pull_x, x).succeed(kzy); auto push_y = hf.push(pull_y, y).succeed(kzy); auto push_z = hf.push(pull_z, z).succeed(kzy); push_tasks.emplace_back(push_x); push_tasks.emplace_back(push_y); push_tasks.emplace_back(push_z); } //std::cout << hf.dump() << "\n"; exit(0); // Create CPU tasks const int num_cpu_tasks = pull_tasks.size() << 2; std::atomic<int> cpu_counter {0}; std::vector<hf::HostTask> host_tasks; for(int i=0 ; i<num_cpu_tasks; i++) { auto host_task = hf.host([&](){ cpu_counter++; }); host_task.precede(pull_tasks[i%pull_tasks.size()]); } executor.run(hf).wait(); hipDeviceSynchronize(); for(int j=0; j<num_graphs; j++) { for (int i = 0; i < N; i++) { assert(zs[j][i]-6.0f == 0.0f); } } assert(cpu_counter.load() == num_cpu_tasks); std::cout << "Done\n"; } int main(int argc, char* argv[]) { mix(); }
25122622ac75dad3b01ddc26ff71dd77cf8623d0.cu
#include <heteroflow/heteroflow.hpp> #include <vector> #include <cassert> // Compilation: nvcc -O2 -g ./unittest/heteroflow.cu -std=c++14 -I . __global__ void assign_value(int n, float a, float *x) { // Get the corresponding idx int i = blockIdx.x*blockDim.x + threadIdx.x; if (i < n) { x[i] = a; } } __global__ void add(int n, float *x, float *y) { // Get the corresponding idx int i = blockIdx.x*blockDim.x + threadIdx.x; if (i < n) { y[i] += x[i]; } } void gpu_only() { const int N = 1 << 12; int num_devices {-1}; cudaGetDeviceCount(&num_devices); hf::Executor executor(1, num_devices); hf::Heteroflow hf("unittest"); const int num_graphs = 1000; std::vector<std::vector<float>> xs (num_graphs); std::vector<std::vector<float>> ys (num_graphs); std::vector<std::vector<float>> zs (num_graphs); for(int i=0; i<num_graphs; i++) { auto &x = xs[i]; auto &y = ys[i]; auto &z = zs[i]; x.resize(N, 1.0f); y.resize(N, 2.0f); z.resize(N, 3.0f); auto pull_x = hf.pull(x); auto pull_y = hf.pull(y); auto pull_z = hf.pull(z); auto kx = hf.kernel(assign_value, N, 1.0f, pull_x) .grid_x((N+255)/256) .block_x(256); auto ky = hf.kernel(assign_value, N, 2.0f, pull_y) .grid_x((N+255)/256) .block_x(256); pull_x.precede(kx); pull_y.precede(ky); auto kxy = hf.kernel(add, N, pull_x, pull_y) .grid_x((N+255)/256) .block_x(256).name("KXY") .succeed(kx, ky); auto kzy = hf.kernel(add, N, pull_y, pull_z) .grid_x((N+255)/256) .block_x(256).name("KZY") .succeed(pull_z, kxy); auto push_x = hf.push(pull_x, x).succeed(kzy); auto push_y = hf.push(pull_y, y).succeed(kzy); auto push_z = hf.push(pull_z, z).succeed(kzy); } //std::cout << hf.dump() << "\n"; exit(0); executor.run(hf).wait(); cudaDeviceSynchronize(); for(int j=0; j<num_graphs; j++) { for (int i = 0; i < N; i++) { assert(zs[j][i]-6.0f == 0.0f); } } std::cout << "Done\n"; } void mix() { const int N = 1 << 12; int num_devices {-1}; cudaGetDeviceCount(&num_devices); hf::Executor executor(40, num_devices); hf::Heteroflow hf("unittest"); const int num_graphs = 1000; std::vector<std::vector<float>> xs (num_graphs); std::vector<std::vector<float>> ys (num_graphs); std::vector<std::vector<float>> zs (num_graphs); std::vector<hf::PullTask> pull_tasks; std::vector<hf::PushTask> push_tasks; for(int i=0; i<num_graphs; i++) { auto &x = xs[i]; auto &y = ys[i]; auto &z = zs[i]; x.resize(N, 1.0f); y.resize(N, 2.0f); z.resize(N, 3.0f); auto pull_x = hf.pull(x); auto pull_y = hf.pull(y); auto pull_z = hf.pull(z); pull_tasks.emplace_back(pull_x); pull_tasks.emplace_back(pull_y); pull_tasks.emplace_back(pull_z); auto kx = hf.kernel(assign_value, N, 1.0f, pull_x) .grid_x((N+255)/256) .block_x(256); auto ky = hf.kernel(assign_value, N, 2.0f, pull_y) .grid_x((N+255)/256) .block_x(256); pull_x.precede(kx); pull_y.precede(ky); auto kxy = hf.kernel(add, N, pull_x, pull_y) .grid_x((N+255)/256) .block_x(256).name("KXY") .succeed(kx, ky); auto kzy = hf.kernel(add, N, pull_y, pull_z) .grid_x((N+255)/256) .block_x(256).name("KZY") .succeed(pull_z, kxy); auto push_x = hf.push(pull_x, x).succeed(kzy); auto push_y = hf.push(pull_y, y).succeed(kzy); auto push_z = hf.push(pull_z, z).succeed(kzy); push_tasks.emplace_back(push_x); push_tasks.emplace_back(push_y); push_tasks.emplace_back(push_z); } //std::cout << hf.dump() << "\n"; exit(0); // Create CPU tasks const int num_cpu_tasks = pull_tasks.size() << 2; std::atomic<int> cpu_counter {0}; std::vector<hf::HostTask> host_tasks; for(int i=0 ; i<num_cpu_tasks; i++) { auto host_task = hf.host([&](){ cpu_counter++; }); host_task.precede(pull_tasks[i%pull_tasks.size()]); } executor.run(hf).wait(); cudaDeviceSynchronize(); for(int j=0; j<num_graphs; j++) { for (int i = 0; i < N; i++) { assert(zs[j][i]-6.0f == 0.0f); } } assert(cpu_counter.load() == num_cpu_tasks); std::cout << "Done\n"; } int main(int argc, char* argv[]) { mix(); }
cbb4ff711f4130869351fc4298ccfdaa0457ef10.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include "includes.h" __global__ void ShortestPath1(float *Arr1,float *Arr2,int N,int rows, int rank){ //rowNum is number of rows for each process (full assigned to process) //Arr1 input array,Holds of (u,v) //Arr2 output array int k; int col=blockIdx.x * blockDim.x + threadIdx.x; int row=blockIdx.y * blockDim.y + threadIdx.y; int offset=rows*rank; int index=row*N+col; int index_ik,index_kj; Arr2[index]=Arr1[index]; for(k=rank*rows; k<((rank+1)*rows); k++){ index_ik = row*N+k; index_kj = (k-offset)*N+col; if(Arr1[index]>(Arr1[index_ik]+Arr1[index_kj])){ Arr2[index]=Arr1[index_ik]+Arr1[index_kj]; } __syncthreads(); } }
cbb4ff711f4130869351fc4298ccfdaa0457ef10.cu
#include "includes.h" __global__ void ShortestPath1(float *Arr1,float *Arr2,int N,int rows, int rank){ //rowNum is number of rows for each process (full assigned to process) //Arr1 input array,Holds of (u,v) //Arr2 output array int k; int col=blockIdx.x * blockDim.x + threadIdx.x; int row=blockIdx.y * blockDim.y + threadIdx.y; int offset=rows*rank; int index=row*N+col; int index_ik,index_kj; Arr2[index]=Arr1[index]; for(k=rank*rows; k<((rank+1)*rows); k++){ index_ik = row*N+k; index_kj = (k-offset)*N+col; if(Arr1[index]>(Arr1[index_ik]+Arr1[index_kj])){ Arr2[index]=Arr1[index_ik]+Arr1[index_kj]; } __syncthreads(); } }