hip_filename
stringlengths
5
84
hip_content
stringlengths
79
9.69M
cuda_filename
stringlengths
4
83
cuda_content
stringlengths
19
9.69M
f3105e0890aa174be39a0adc81b81cb0af497b1d.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include "device_launch_parameters.h" #include <stdio.h> #include <stdlib.h> #include <string.h> #include <time.h> typedef unsigned long long ul; typedef unsigned int uint; int banyakdata = 256; int dimensigrid = 2; int dimensiblok = 128; typedef struct { char size; uint* value; }big; typedef struct { short size; char* value; }stringnumber; __host__ __device__ short ukuranbit(big *a); __host__ __device__ char getbit(big* a, short count); __host__ __device__ uint getShiftedBlock(big *num, char noblok, char geser); __host__ __device__ void kali(big *a, big *b, big* res); __host__ __device__ void modulo(big* a, big* b, big* res, uint* minbuff); __host__ __device__ void modexp(big* a, big* b, big* c, big* res, uint* minbuff, big* mulbuff); void enkripsi(big *m, big *k, big *g, big *p, big *y, big *res, big *minbuff, big *mulbuff); void dekripsi(big *c1, big *c2, big *e, big *p, big *res, big *minbuff, big *mulbuff); void kernelenk(big *m, big *k, big *g, big *p, big *y, big *res, big *minbuff, big *mulbuff); void kerneldek(big *c, big *e, big *p, big *res, big *minbuff, big *mulbuff); void CUDAenk(big *m, big *k, big* g, big* p, big* y, big *res); void CUDAdek(big *c, big *e, big* p, big *res); void mainenkripsi(big *m, big *k, big *res, big *g, big *p, big *y); void maindekripsi(big* c,big* x,big* p,big* res2); void tambah(big* a, char b, big* res); void kurang(big* a, big *b, big* res); void divandmod(big* a, big* &b, big* divres, big* modres, uint* minbuff); void carikunciy(big *g, big *x, big *p, big *y, uint *minbuff, big *mulbuff); void init(big *p, big *g, big *x, big*e, big *y, big *m, big *k, big *res, big *res2); void copybig(big* a, big* res); void stringtobig(stringnumber* sn, big* res, big* mulbuff, big* ten); void bigtostring(big* x, stringnumber* sn, big* ten, big* xbuff, big* divbuff, big* modbuff, uint* minbuff); void printsn(stringnumber* sn); void teskonversi(); void enkripsi(big *m, big *k, big *g, big *p, big *y, big *res, big *minbuff, big *mulbuff) { // BLok 1 Cipher modexp(g,k,p,res,minbuff->value,mulbuff); // Blok 2 Cipher modexp(y, k, p, res + 1,minbuff->value,mulbuff); kali(res + 1, m, mulbuff); modulo(mulbuff, p, res+1, minbuff->value); } void dekripsi(big *c1, big *c2, big *e, big *p, big *res, big *minbuff, big *mulbuff) { modexp(c1,e,p,res,minbuff->value,mulbuff); kali(res, c2, mulbuff); modulo(mulbuff, p, res, minbuff->value); } void kernelenk(big *m, big *k, big *g, big *p, big *y, big *res, big *minbuff, big *mulbuff){ for (int i = 0; i < banyakdata; i++) { enkripsi(m + i, k + i, g, p, y, res + 2 * i, minbuff+i, mulbuff+i); } } void kerneldek(big *c, big *e, big *p, big *res, big *minbuff, big *mulbuff){ for (int i = 0; i < banyakdata; i++) { dekripsi(c + 2*i, c + 2*i+1, e, p, res+i, minbuff+i, mulbuff+i); } } void CUDAenk(big *m, big *k, big* g, big* p, big* y, big *res) { big *minbuff, *mulbuff; minbuff = (big*) malloc(banyakdata * sizeof(big)); mulbuff = (big*) malloc(banyakdata * sizeof(big)); for (int i = 0; i < banyakdata; i++) { minbuff[i].value = (uint*) malloc(sizeof(uint) * p->size * 2); mulbuff[i].value = (uint*) malloc(sizeof(uint) * p->size * 2); } clock_t begin = clock(); kernelenk(m, k, g, p, y, res, minbuff, mulbuff); clock_t end = clock(); double time_spent = (double)(end - begin) / 1000; printf("Durasi : %f ms\n", time_spent); } void CUDAdek(big *c, big *e, big* p, big *res) { big *minbuff, *mulbuff; minbuff = (big*) malloc(banyakdata * sizeof(big)); mulbuff = (big*) malloc(banyakdata * sizeof(big)); for (int i = 0; i < banyakdata; i++) { minbuff[i].value = (uint*) malloc(sizeof(uint) * p->size * 2); mulbuff[i].value = (uint*) malloc(sizeof(uint) * p->size * 2); } clock_t begin = clock(); kerneldek(c, e, p, res, minbuff, mulbuff); clock_t end = clock(); double time_spent = (double)(end - begin) / 1000; printf("Durasi : %f ms\n", time_spent); } void mainenkripsi(big *m, big *k, big *res, big *g, big *p, big *y){ printf("Encrypting...\n"); CUDAenk(m, k, g, p, y, res); for (int i = 0; i < 5; i++) { printf("Cipher %d size %d : %u\n",i, res[i].size, res[i].value[0]); } printf("Cipher ... : ...\n"); printf("Cipher %d size %d : %u\n",banyakdata*2-2, res[banyakdata*2-2].size, res[banyakdata*2-2].value[0]); printf("Cipher %d size %d : %u\n",banyakdata*2-1, res[banyakdata*2-2].size, res[banyakdata*2-1].value[0]); } void maindekripsi(big* c, big* e,big* p,big* res2){ printf("Decrypting...\n"); CUDAdek(c, e, p, res2); for (int i = 0; i < 5; i++) { printf("Plain %d size %d : %u\n",i, res2[i].size, res2[i].value[0]); printf("Plain %d size %d : %u\n",i, res2[i].size, res2[i].value[1]); } printf("Plain ... : ...\n"); printf("Plain %d size %d : %u\n",banyakdata-1, res2[banyakdata-1].size, res2[banyakdata-1].value[0]); } void carikunciy(big *g, big *x, big *p, big *y, uint *minbuff, big *mulbuff){ modexp(g,x,p,y,minbuff,mulbuff); } void init(big *p, big *g, big *x, big*e, big *y, big *m, big *k, big *res, big *res2){ // Kunci publik p p->size = 16; p->value = (uint*) malloc(p->size * sizeof(uint)); p->value[0] = UINT_MAX; for (int i = 1; i < p->size; i++) { //p->value[i] = 2357; p->value[i] = rand() % UINT_MAX; } // p->value[0] = UINT_MAX-4; // p->value[0] = 2387; // p->value[1] = 2357; // Kunci publik g g->size = 16; g->value = (uint*) malloc(g->size * sizeof(uint)); for (int i = 0; i < g->size; i++) { // g->value[i] = 2; g->value[i] = rand() % UINT_MAX; } // Kunci privat x x->size = 16; x->value = (uint*) malloc(x->size * sizeof(uint)); for (int i = 0; i < x->size; i++) { // x->value[i] = 1751; x->value[i] = rand() % UINT_MAX; } // Cari nilai eksponen e = (p-x-1) untuk dekripsi big *xplus1 = (big*) malloc(sizeof(big)); xplus1->value = (uint*) malloc(p->size * sizeof(uint)); e->value = (uint*) malloc(p->size * sizeof(uint)); tambah(x, 1, xplus1); kurang(p,xplus1,e); // printf("e adalah %u\n", e->value[0]); free(xplus1->value); free(xplus1); // Cari nilai kunci publik y = (g^x) mod p big* mulbuff = (big*) malloc(sizeof(big)); mulbuff->value = (uint*) malloc(sizeof(uint) * p->size * 2); uint* minbuff = (uint*) malloc(sizeof(uint) * p->size * 2); y->value = (uint*) malloc(p->size * 2 * sizeof(uint)); carikunciy(g,x,p,y,minbuff,mulbuff); // printf("y adalah %u\n",y->value[0]); //========================================================// // Blok plainteks for(int i = 0 ; i < banyakdata ; i++){ m[i].size = 16; m[i].value = (uint*) malloc(m[i].size * sizeof(uint)); for (int j = 0; j < m[i].size; j++) { // m[i].value[j] = 1001; m[i].value[j] = rand() % UINT_MAX; } // Nilai k masing-masing blok k[i].size = 16; k[i].value = (uint*) malloc(k[i].size * sizeof(uint)); for (int j = 0; j < k[i].size; j++) { // k[i].value[j] = 77; k[i].value[j] = rand() % UINT_MAX; } } // Alokasi memori untuk result for (int i = 0; i < banyakdata*2; i++) { res[i].value = (uint*) malloc(sizeof(uint) * p->size *2); } // Alokasi memori untuk result 2 for (int i = 0; i < banyakdata; i++) { res2[i].value = (uint*) malloc(sizeof(uint) * p->size * 2); } } int main(){ big *p, *g, *x, *e, *y, *m, *k, *res, *res2; p = (big*)malloc(sizeof(big)); g = (big*)malloc(sizeof(big)); x = (big*)malloc(sizeof(big)); e = (big*)malloc(sizeof(big)); y = (big*)malloc(sizeof(big)); m = (big*)malloc(banyakdata * sizeof(big)); k = (big*)malloc(banyakdata * sizeof(big)); res = (big*)malloc(banyakdata * 2 * sizeof(big)); res2 = (big*)malloc(banyakdata * sizeof(big)); init(p,g,x,e,y,m,k,res,res2); mainenkripsi(m,k,res,g,p,y); printf(" ========================= \n"); maindekripsi(res,e,p,res2); free(p->value); free(p); free(g->value); free(g); free(x->value); free(x); free(e->value); free(e); free(y->value); free(y); free(m->value); free(m); free(k->value); free(k); free(res->value); free(res); free(res2->value); free(res2); //teskonversi(); return 0; } __host__ __device__ short ukuranbit(big *a) { uint lastval = a->value[a->size-1]; short res = 0; while (lastval != 0) { lastval >>= 1; res++; } return res + (a->size - 1) * 32; } __host__ __device__ char getbit(big* a, short count) { return (a->value[count / 32] & ((uint) 1 << (count % 32))) != 0; } __host__ __device__ uint getShiftedBlock(big *num, char noblok, char geser) { uint part1 = (noblok == 0 || geser == 0) ? 0 : (num->value[noblok - 1] >> (32-geser)); uint part2 = (noblok == num->size) ? 0 : (num->value[noblok] << geser); return part1 | part2; } __host__ __device__ void kali(big *a, big *b, big* res) { if (a->size == 0 || b->size == 0) { res->size = 0; return ; } char ukurana = a->size; char ukuranb = b->size; char ukuranres = ukurana + ukuranb; res->size = ukuranres; for (char i = 0; i < ukuranres; i++) { res->value[i] = 0; } for (char i = 0; i < ukurana; i++) { uint aval = a->value[i]; if (aval==0){ continue; } uint lebih = 0; for (char j = 0, lebih = 0; j < ukuranb; j++) { uint bval = b->value[j]; ul temp = res->value[i+j] + aval * bval + lebih; res->value[i+j] = temp % UINT_MAX; lebih = temp / UINT_MAX; } res->value[i+ukuranb] = lebih; } if (res->value[res->size - 1] == 0){ res->size--; } } __host__ __device__ void modexp(big* a, big* b, big* c, big* res, uint* minbuff, big* mulbuff){ res->size = 1; res->value[0] = 1; short i = ukuranbit(b); while (i > 0) { i--; kali(res,res,mulbuff); modulo(mulbuff,c,res,minbuff); if (getbit(b,i)) { kali(res, a, mulbuff); modulo(mulbuff, c, res, minbuff); } } } __host__ __device__ void modulo(big* a, big* b, big* res, uint* minbuff) { res->size = a->size; for(char i = 0 ; i < res->size ;i++){ res->value[i] = a->value[i]; } if (a->size < b->size) { return ; } char i, j, k; char i2; uint temp ; char borrowIn, borrowOut; char ukurana = a->size; char ukuranb = b->size; res->value[res->size] = 0; res->size++; i = ukurana - ukuranb + 1; while (i > 0) { i--; i2 = 32; while (i2 > 0) { i2--; for (j = 0, k = i, borrowIn = 0; j <= ukuranb; j++, k++) { temp = res->value[k] - getShiftedBlock(b, j, i2); borrowOut = (temp > res->value[k]); if (borrowIn) { borrowOut |= (temp == 0); temp--; } minbuff[k] = temp; borrowIn = borrowOut; } for (; k < ukurana && borrowIn; k++) { borrowIn = (res->value[k] == 0); minbuff[k] = res->value[k] - 1; } if (!borrowIn) { while (k > i) { k--; res->value[k] = minbuff[k]; } } } } while (res->size > 0 && res->value[res->size - 1] == 0) res->size--; } void divandmod(big* a, big* &b, big* divres, big* modres, uint* minbuff) { modres->size = a->size; for(char i = 0 ; i < modres->size ;i++){ modres->value[i] = a->value[i]; } if (a->size < b->size) { return ; } char i, j, k; char i2; uint temp ; char borrowIn, borrowOut; char ukurana = a->size; char ukuranb = b->size; modres->value[modres->size] = 0; modres->size++; divres->size = ukurana - ukuranb + 1; for (i = 0; i < divres->size; i++) divres->value[i] = 0; i = ukurana - ukuranb + 1; while (i > 0) { i--; divres->value[i] = 0; i2 = 32; while (i2 > 0) { i2--; for (j = 0, k = i, borrowIn = 0; j <= ukuranb; j++, k++) { temp = modres->value[k] - getShiftedBlock(b, j, i2); borrowOut = (temp > modres->value[k]); if (borrowIn) { borrowOut |= (temp == 0); temp--; } minbuff[k] = temp; borrowIn = borrowOut; } for (; k < ukurana && borrowIn; k++) { borrowIn = (modres->value[k] == 0); minbuff[k] = modres->value[k] - 1; } if (!borrowIn) { divres->value[i] |= ((uint) 1 << i2); while (k > i) { k--; modres->value[k] = minbuff[k]; } } } } if (divres->value[divres->size - 1] == 0) divres->size--; while (modres->size > 0 && modres->value[modres->size - 1] == 0) modres->size--; } void tambah(big* a, char b, big* res) { if (a->size == 0) { res->size = 1; res->value[0] = uint(b); return; } char carryIn = 0; uint temp; res->size = a->size + 1; res->value[0] = a->value[0] + (uint)b; carryIn = (res->value[0] < a->value[0]); char i = 1; for (; i < a->size && carryIn; i++) { temp = a->value[i] + (uint)1; carryIn = (temp == 0); res->value[i] = temp; } for (; i < a->size; i++) res->value[i] = a->value[i]; if (carryIn) res->value[i] = 1; else res->size--; } void kurang(big* a, big *b, big* res) { res->size = a->size; for (int i = 0; i < res->size; i++){ res->value[i] = 0; } if (b->size == 0) { return; } char borrowIn, borrowOut; uint temp; char i; for (i = 0, borrowIn = 0; i < b->size; i++) { temp = a->value[i] - b->value[i]; borrowOut = (temp > a->value[i]); if (borrowIn) { borrowOut |= (temp == 0); temp--; } res->value[i] = temp; borrowIn = borrowOut; } for (; i < a->size && borrowIn; i++) { borrowIn = (a->value[i] == 0); res->value[i] = a->value[i] - 1; } for (; i < a->size; i++) res->value[i] = a->value[i]; if (res->value[res->size - 1] == 0){ res->size--; } } void copybig(big* a, big* res){ res->size = a->size; for (int i = 0; i < res->size; i++){ res->value[i] = a->value[i]; } } void stringtobig(stringnumber* sn, big* res, big* mulbuff, big* ten){ res->size = 0; for (int i = sn->size-1; i >= 0; i--){ kali(res, ten, mulbuff); tambah(mulbuff, sn->value[i], res); } } void bigtostring(big* x, stringnumber* sn, big* ten, big* xbuff, big* divbuff, big* modbuff, uint* minbuff) { copybig(x,xbuff); short snlength = 0; while (xbuff->size != 0 ) { divandmod(xbuff,ten,divbuff,modbuff,minbuff); sn->value[snlength] = (char) modbuff->value[0]; snlength++; copybig(divbuff,xbuff); } sn->size = snlength; } void printsn(stringnumber* sn){ for (int i = 0; i < sn->size; ++i){ printf("%d", sn->value[sn->size-i-1]); } printf("\n"); } void teskonversi(){ int seed = time(NULL); srand(seed); stringnumber *sn = (stringnumber*) malloc(sizeof(stringnumber)); sn->size = 25; sn->value = (char *) malloc(sn->size); for (int i = 0; i < sn->size; i++) { sn->value[i] = rand() % 10; } big* konversi = (big*) malloc(sizeof(big)); big* mulbuff = (big*) malloc(sizeof(big)); big* ten = (big*) malloc(sizeof(big)); konversi->value = (uint*) malloc(sizeof(10)); mulbuff->value = (uint*) malloc(sizeof(10)); ten->value = (uint*) malloc(sizeof(1)); ten->size = 1; ten->value[0] = 10; printf("Stringnumber awal : "); printsn(sn); stringtobig(sn, konversi, mulbuff, ten); printf("konversi size %d\n", konversi->size); printf("konversi value 0 %u\n", konversi->value[0]); printf("konversi value 0 %u\n", konversi->value[1]); stringnumber *sn2 = (stringnumber*) malloc(sizeof(stringnumber)); big* xbuff = (big*) malloc(sizeof(big)); big* divbuff = (big*) malloc(sizeof(big)); big* modbuff = (big*) malloc(sizeof(big)); sn2->value = (char *) malloc(100); xbuff->value = (uint *) malloc(sizeof(uint) * 10); divbuff->value = (uint *) malloc(sizeof(uint) * 10); modbuff->value = (uint *) malloc(sizeof(uint) * 10); uint* minbuff = (uint*) malloc(sizeof(uint) * 10); bigtostring(konversi,sn2,ten,xbuff,divbuff,modbuff,minbuff); printf("Stringnumber akhir : "); printsn(sn2); }
f3105e0890aa174be39a0adc81b81cb0af497b1d.cu
#include "cuda_runtime.h" #include "device_launch_parameters.h" #include <stdio.h> #include <stdlib.h> #include <string.h> #include <time.h> typedef unsigned long long ul; typedef unsigned int uint; int banyakdata = 256; int dimensigrid = 2; int dimensiblok = 128; typedef struct { char size; uint* value; }big; typedef struct { short size; char* value; }stringnumber; __host__ __device__ short ukuranbit(big *a); __host__ __device__ char getbit(big* a, short count); __host__ __device__ uint getShiftedBlock(big *num, char noblok, char geser); __host__ __device__ void kali(big *a, big *b, big* res); __host__ __device__ void modulo(big* a, big* b, big* res, uint* minbuff); __host__ __device__ void modexp(big* a, big* b, big* c, big* res, uint* minbuff, big* mulbuff); void enkripsi(big *m, big *k, big *g, big *p, big *y, big *res, big *minbuff, big *mulbuff); void dekripsi(big *c1, big *c2, big *e, big *p, big *res, big *minbuff, big *mulbuff); void kernelenk(big *m, big *k, big *g, big *p, big *y, big *res, big *minbuff, big *mulbuff); void kerneldek(big *c, big *e, big *p, big *res, big *minbuff, big *mulbuff); void CUDAenk(big *m, big *k, big* g, big* p, big* y, big *res); void CUDAdek(big *c, big *e, big* p, big *res); void mainenkripsi(big *m, big *k, big *res, big *g, big *p, big *y); void maindekripsi(big* c,big* x,big* p,big* res2); void tambah(big* a, char b, big* res); void kurang(big* a, big *b, big* res); void divandmod(big* a, big* &b, big* divres, big* modres, uint* minbuff); void carikunciy(big *g, big *x, big *p, big *y, uint *minbuff, big *mulbuff); void init(big *p, big *g, big *x, big*e, big *y, big *m, big *k, big *res, big *res2); void copybig(big* a, big* res); void stringtobig(stringnumber* sn, big* res, big* mulbuff, big* ten); void bigtostring(big* x, stringnumber* sn, big* ten, big* xbuff, big* divbuff, big* modbuff, uint* minbuff); void printsn(stringnumber* sn); void teskonversi(); void enkripsi(big *m, big *k, big *g, big *p, big *y, big *res, big *minbuff, big *mulbuff) { // BLok 1 Cipher modexp(g,k,p,res,minbuff->value,mulbuff); // Blok 2 Cipher modexp(y, k, p, res + 1,minbuff->value,mulbuff); kali(res + 1, m, mulbuff); modulo(mulbuff, p, res+1, minbuff->value); } void dekripsi(big *c1, big *c2, big *e, big *p, big *res, big *minbuff, big *mulbuff) { modexp(c1,e,p,res,minbuff->value,mulbuff); kali(res, c2, mulbuff); modulo(mulbuff, p, res, minbuff->value); } void kernelenk(big *m, big *k, big *g, big *p, big *y, big *res, big *minbuff, big *mulbuff){ for (int i = 0; i < banyakdata; i++) { enkripsi(m + i, k + i, g, p, y, res + 2 * i, minbuff+i, mulbuff+i); } } void kerneldek(big *c, big *e, big *p, big *res, big *minbuff, big *mulbuff){ for (int i = 0; i < banyakdata; i++) { dekripsi(c + 2*i, c + 2*i+1, e, p, res+i, minbuff+i, mulbuff+i); } } void CUDAenk(big *m, big *k, big* g, big* p, big* y, big *res) { big *minbuff, *mulbuff; minbuff = (big*) malloc(banyakdata * sizeof(big)); mulbuff = (big*) malloc(banyakdata * sizeof(big)); for (int i = 0; i < banyakdata; i++) { minbuff[i].value = (uint*) malloc(sizeof(uint) * p->size * 2); mulbuff[i].value = (uint*) malloc(sizeof(uint) * p->size * 2); } clock_t begin = clock(); kernelenk(m, k, g, p, y, res, minbuff, mulbuff); clock_t end = clock(); double time_spent = (double)(end - begin) / 1000; printf("Durasi : %f ms\n", time_spent); } void CUDAdek(big *c, big *e, big* p, big *res) { big *minbuff, *mulbuff; minbuff = (big*) malloc(banyakdata * sizeof(big)); mulbuff = (big*) malloc(banyakdata * sizeof(big)); for (int i = 0; i < banyakdata; i++) { minbuff[i].value = (uint*) malloc(sizeof(uint) * p->size * 2); mulbuff[i].value = (uint*) malloc(sizeof(uint) * p->size * 2); } clock_t begin = clock(); kerneldek(c, e, p, res, minbuff, mulbuff); clock_t end = clock(); double time_spent = (double)(end - begin) / 1000; printf("Durasi : %f ms\n", time_spent); } void mainenkripsi(big *m, big *k, big *res, big *g, big *p, big *y){ printf("Encrypting...\n"); CUDAenk(m, k, g, p, y, res); for (int i = 0; i < 5; i++) { printf("Cipher %d size %d : %u\n",i, res[i].size, res[i].value[0]); } printf("Cipher ... : ...\n"); printf("Cipher %d size %d : %u\n",banyakdata*2-2, res[banyakdata*2-2].size, res[banyakdata*2-2].value[0]); printf("Cipher %d size %d : %u\n",banyakdata*2-1, res[banyakdata*2-2].size, res[banyakdata*2-1].value[0]); } void maindekripsi(big* c, big* e,big* p,big* res2){ printf("Decrypting...\n"); CUDAdek(c, e, p, res2); for (int i = 0; i < 5; i++) { printf("Plain %d size %d : %u\n",i, res2[i].size, res2[i].value[0]); printf("Plain %d size %d : %u\n",i, res2[i].size, res2[i].value[1]); } printf("Plain ... : ...\n"); printf("Plain %d size %d : %u\n",banyakdata-1, res2[banyakdata-1].size, res2[banyakdata-1].value[0]); } void carikunciy(big *g, big *x, big *p, big *y, uint *minbuff, big *mulbuff){ modexp(g,x,p,y,minbuff,mulbuff); } void init(big *p, big *g, big *x, big*e, big *y, big *m, big *k, big *res, big *res2){ // Kunci publik p p->size = 16; p->value = (uint*) malloc(p->size * sizeof(uint)); p->value[0] = UINT_MAX; for (int i = 1; i < p->size; i++) { //p->value[i] = 2357; p->value[i] = rand() % UINT_MAX; } // p->value[0] = UINT_MAX-4; // p->value[0] = 2387; // p->value[1] = 2357; // Kunci publik g g->size = 16; g->value = (uint*) malloc(g->size * sizeof(uint)); for (int i = 0; i < g->size; i++) { // g->value[i] = 2; g->value[i] = rand() % UINT_MAX; } // Kunci privat x x->size = 16; x->value = (uint*) malloc(x->size * sizeof(uint)); for (int i = 0; i < x->size; i++) { // x->value[i] = 1751; x->value[i] = rand() % UINT_MAX; } // Cari nilai eksponen e = (p-x-1) untuk dekripsi big *xplus1 = (big*) malloc(sizeof(big)); xplus1->value = (uint*) malloc(p->size * sizeof(uint)); e->value = (uint*) malloc(p->size * sizeof(uint)); tambah(x, 1, xplus1); kurang(p,xplus1,e); // printf("e adalah %u\n", e->value[0]); free(xplus1->value); free(xplus1); // Cari nilai kunci publik y = (g^x) mod p big* mulbuff = (big*) malloc(sizeof(big)); mulbuff->value = (uint*) malloc(sizeof(uint) * p->size * 2); uint* minbuff = (uint*) malloc(sizeof(uint) * p->size * 2); y->value = (uint*) malloc(p->size * 2 * sizeof(uint)); carikunciy(g,x,p,y,minbuff,mulbuff); // printf("y adalah %u\n",y->value[0]); //========================================================// // Blok plainteks for(int i = 0 ; i < banyakdata ; i++){ m[i].size = 16; m[i].value = (uint*) malloc(m[i].size * sizeof(uint)); for (int j = 0; j < m[i].size; j++) { // m[i].value[j] = 1001; m[i].value[j] = rand() % UINT_MAX; } // Nilai k masing-masing blok k[i].size = 16; k[i].value = (uint*) malloc(k[i].size * sizeof(uint)); for (int j = 0; j < k[i].size; j++) { // k[i].value[j] = 77; k[i].value[j] = rand() % UINT_MAX; } } // Alokasi memori untuk result for (int i = 0; i < banyakdata*2; i++) { res[i].value = (uint*) malloc(sizeof(uint) * p->size *2); } // Alokasi memori untuk result 2 for (int i = 0; i < banyakdata; i++) { res2[i].value = (uint*) malloc(sizeof(uint) * p->size * 2); } } int main(){ big *p, *g, *x, *e, *y, *m, *k, *res, *res2; p = (big*)malloc(sizeof(big)); g = (big*)malloc(sizeof(big)); x = (big*)malloc(sizeof(big)); e = (big*)malloc(sizeof(big)); y = (big*)malloc(sizeof(big)); m = (big*)malloc(banyakdata * sizeof(big)); k = (big*)malloc(banyakdata * sizeof(big)); res = (big*)malloc(banyakdata * 2 * sizeof(big)); res2 = (big*)malloc(banyakdata * sizeof(big)); init(p,g,x,e,y,m,k,res,res2); mainenkripsi(m,k,res,g,p,y); printf(" ========================= \n"); maindekripsi(res,e,p,res2); free(p->value); free(p); free(g->value); free(g); free(x->value); free(x); free(e->value); free(e); free(y->value); free(y); free(m->value); free(m); free(k->value); free(k); free(res->value); free(res); free(res2->value); free(res2); //teskonversi(); return 0; } __host__ __device__ short ukuranbit(big *a) { uint lastval = a->value[a->size-1]; short res = 0; while (lastval != 0) { lastval >>= 1; res++; } return res + (a->size - 1) * 32; } __host__ __device__ char getbit(big* a, short count) { return (a->value[count / 32] & ((uint) 1 << (count % 32))) != 0; } __host__ __device__ uint getShiftedBlock(big *num, char noblok, char geser) { uint part1 = (noblok == 0 || geser == 0) ? 0 : (num->value[noblok - 1] >> (32-geser)); uint part2 = (noblok == num->size) ? 0 : (num->value[noblok] << geser); return part1 | part2; } __host__ __device__ void kali(big *a, big *b, big* res) { if (a->size == 0 || b->size == 0) { res->size = 0; return ; } char ukurana = a->size; char ukuranb = b->size; char ukuranres = ukurana + ukuranb; res->size = ukuranres; for (char i = 0; i < ukuranres; i++) { res->value[i] = 0; } for (char i = 0; i < ukurana; i++) { uint aval = a->value[i]; if (aval==0){ continue; } uint lebih = 0; for (char j = 0, lebih = 0; j < ukuranb; j++) { uint bval = b->value[j]; ul temp = res->value[i+j] + aval * bval + lebih; res->value[i+j] = temp % UINT_MAX; lebih = temp / UINT_MAX; } res->value[i+ukuranb] = lebih; } if (res->value[res->size - 1] == 0){ res->size--; } } __host__ __device__ void modexp(big* a, big* b, big* c, big* res, uint* minbuff, big* mulbuff){ res->size = 1; res->value[0] = 1; short i = ukuranbit(b); while (i > 0) { i--; kali(res,res,mulbuff); modulo(mulbuff,c,res,minbuff); if (getbit(b,i)) { kali(res, a, mulbuff); modulo(mulbuff, c, res, minbuff); } } } __host__ __device__ void modulo(big* a, big* b, big* res, uint* minbuff) { res->size = a->size; for(char i = 0 ; i < res->size ;i++){ res->value[i] = a->value[i]; } if (a->size < b->size) { return ; } char i, j, k; char i2; uint temp ; char borrowIn, borrowOut; char ukurana = a->size; char ukuranb = b->size; res->value[res->size] = 0; res->size++; i = ukurana - ukuranb + 1; while (i > 0) { i--; i2 = 32; while (i2 > 0) { i2--; for (j = 0, k = i, borrowIn = 0; j <= ukuranb; j++, k++) { temp = res->value[k] - getShiftedBlock(b, j, i2); borrowOut = (temp > res->value[k]); if (borrowIn) { borrowOut |= (temp == 0); temp--; } minbuff[k] = temp; borrowIn = borrowOut; } for (; k < ukurana && borrowIn; k++) { borrowIn = (res->value[k] == 0); minbuff[k] = res->value[k] - 1; } if (!borrowIn) { while (k > i) { k--; res->value[k] = minbuff[k]; } } } } while (res->size > 0 && res->value[res->size - 1] == 0) res->size--; } void divandmod(big* a, big* &b, big* divres, big* modres, uint* minbuff) { modres->size = a->size; for(char i = 0 ; i < modres->size ;i++){ modres->value[i] = a->value[i]; } if (a->size < b->size) { return ; } char i, j, k; char i2; uint temp ; char borrowIn, borrowOut; char ukurana = a->size; char ukuranb = b->size; modres->value[modres->size] = 0; modres->size++; divres->size = ukurana - ukuranb + 1; for (i = 0; i < divres->size; i++) divres->value[i] = 0; i = ukurana - ukuranb + 1; while (i > 0) { i--; divres->value[i] = 0; i2 = 32; while (i2 > 0) { i2--; for (j = 0, k = i, borrowIn = 0; j <= ukuranb; j++, k++) { temp = modres->value[k] - getShiftedBlock(b, j, i2); borrowOut = (temp > modres->value[k]); if (borrowIn) { borrowOut |= (temp == 0); temp--; } minbuff[k] = temp; borrowIn = borrowOut; } for (; k < ukurana && borrowIn; k++) { borrowIn = (modres->value[k] == 0); minbuff[k] = modres->value[k] - 1; } if (!borrowIn) { divres->value[i] |= ((uint) 1 << i2); while (k > i) { k--; modres->value[k] = minbuff[k]; } } } } if (divres->value[divres->size - 1] == 0) divres->size--; while (modres->size > 0 && modres->value[modres->size - 1] == 0) modres->size--; } void tambah(big* a, char b, big* res) { if (a->size == 0) { res->size = 1; res->value[0] = uint(b); return; } char carryIn = 0; uint temp; res->size = a->size + 1; res->value[0] = a->value[0] + (uint)b; carryIn = (res->value[0] < a->value[0]); char i = 1; for (; i < a->size && carryIn; i++) { temp = a->value[i] + (uint)1; carryIn = (temp == 0); res->value[i] = temp; } for (; i < a->size; i++) res->value[i] = a->value[i]; if (carryIn) res->value[i] = 1; else res->size--; } void kurang(big* a, big *b, big* res) { res->size = a->size; for (int i = 0; i < res->size; i++){ res->value[i] = 0; } if (b->size == 0) { return; } char borrowIn, borrowOut; uint temp; char i; for (i = 0, borrowIn = 0; i < b->size; i++) { temp = a->value[i] - b->value[i]; borrowOut = (temp > a->value[i]); if (borrowIn) { borrowOut |= (temp == 0); temp--; } res->value[i] = temp; borrowIn = borrowOut; } for (; i < a->size && borrowIn; i++) { borrowIn = (a->value[i] == 0); res->value[i] = a->value[i] - 1; } for (; i < a->size; i++) res->value[i] = a->value[i]; if (res->value[res->size - 1] == 0){ res->size--; } } void copybig(big* a, big* res){ res->size = a->size; for (int i = 0; i < res->size; i++){ res->value[i] = a->value[i]; } } void stringtobig(stringnumber* sn, big* res, big* mulbuff, big* ten){ res->size = 0; for (int i = sn->size-1; i >= 0; i--){ kali(res, ten, mulbuff); tambah(mulbuff, sn->value[i], res); } } void bigtostring(big* x, stringnumber* sn, big* ten, big* xbuff, big* divbuff, big* modbuff, uint* minbuff) { copybig(x,xbuff); short snlength = 0; while (xbuff->size != 0 ) { divandmod(xbuff,ten,divbuff,modbuff,minbuff); sn->value[snlength] = (char) modbuff->value[0]; snlength++; copybig(divbuff,xbuff); } sn->size = snlength; } void printsn(stringnumber* sn){ for (int i = 0; i < sn->size; ++i){ printf("%d", sn->value[sn->size-i-1]); } printf("\n"); } void teskonversi(){ int seed = time(NULL); srand(seed); stringnumber *sn = (stringnumber*) malloc(sizeof(stringnumber)); sn->size = 25; sn->value = (char *) malloc(sn->size); for (int i = 0; i < sn->size; i++) { sn->value[i] = rand() % 10; } big* konversi = (big*) malloc(sizeof(big)); big* mulbuff = (big*) malloc(sizeof(big)); big* ten = (big*) malloc(sizeof(big)); konversi->value = (uint*) malloc(sizeof(10)); mulbuff->value = (uint*) malloc(sizeof(10)); ten->value = (uint*) malloc(sizeof(1)); ten->size = 1; ten->value[0] = 10; printf("Stringnumber awal : "); printsn(sn); stringtobig(sn, konversi, mulbuff, ten); printf("konversi size %d\n", konversi->size); printf("konversi value 0 %u\n", konversi->value[0]); printf("konversi value 0 %u\n", konversi->value[1]); stringnumber *sn2 = (stringnumber*) malloc(sizeof(stringnumber)); big* xbuff = (big*) malloc(sizeof(big)); big* divbuff = (big*) malloc(sizeof(big)); big* modbuff = (big*) malloc(sizeof(big)); sn2->value = (char *) malloc(100); xbuff->value = (uint *) malloc(sizeof(uint) * 10); divbuff->value = (uint *) malloc(sizeof(uint) * 10); modbuff->value = (uint *) malloc(sizeof(uint) * 10); uint* minbuff = (uint*) malloc(sizeof(uint) * 10); bigtostring(konversi,sn2,ten,xbuff,divbuff,modbuff,minbuff); printf("Stringnumber akhir : "); printsn(sn2); }
a6d0e0e4967a08ba998585310ea8ba0a5d3bbe0b.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include "hip/hip_runtime.h" #include "device_launch_parameters.h" #include "hip/device_functions.h" #include <stdio.h> #include <stdlib.h> #include <time.h> #include <math.h> #define uint unsigned int #ifdef __INTELLISENSE__ void __syncthreads(); #endif // __INTELLISENSE__ void InitCPUData(double** matrices, int size); void FillHostMatrix(double** matrices, int size); hipError_t InitGPUData(double** matrices, int **dSize, int size, int **dStride, int stride); hipError_t CudaMemcpyMatrix(double** matrices, int size, hipMemcpyKind flag); void ForwardElimination(double* matrix, int size); void BackwardSubstitute(double* matrix, int size); __global__ void ForwardEliminationColumn(double* matrix, int* size, int* row, int* stride, int* pivotRow); int main() { dim3 grid_dim = dim3(1, 1, 1); dim3 block_dim = dim3(1024, 1, 1); double** matrices = (double**)malloc(4 * sizeof(double*)); //0 CPU, 1 HGPU, 2 DGPU, 3 Backup int size = 64; //Number of Rows / Columns, number of elements = size ^ 2 + size int *dSize = 0; for (size; size < 2049; size *= 2) //64, 128, 256, 512, 1024, 2048. { int failed = 0; FILE *csv; csv = fopen("DV2575Ass2Times.csv", "a"); fprintf(csv, "\nCPU,1,2,4,8\n"); fclose(csv); double GPUTimes[50] = { 0.f }; double CPUTimes[10] = { 0.f }; for (int rep = 0; rep < 10; ++rep) { //Init matrices and variable storage InitCPUData(matrices, size); timespec before; timespec after; timespec_get(&before, TIME_UTC); ForwardElimination(matrices[0], size); timespec_get(&after, TIME_UTC); double timeTakenSec = after.tv_sec - before.tv_sec; long long timeTakenNsec = after.tv_nsec - before.tv_nsec; long long timeTakenMsec = round(timeTakenNsec / 1000000.f); timeTakenSec += (double)timeTakenMsec / 1000.f; CPUTimes[rep] = timeTakenSec; BackwardSubstitute(matrices[0], size); for (int stride = 1; stride < 9; stride *= 2) //1, 2, 4, 8 { int *dStride = 0; int totalStride = stride * (size / (grid_dim.x * block_dim.x * stride/*Total number of threads, multiplied by the stride*/) + 1); //KERNEL CALL 1, Forward elimination int* dRow = 0; int* dPivotRow = 0; timespec_get(&before, TIME_UTC); hipMalloc((void**)&dRow, sizeof(int)); hipMalloc((void**)&dPivotRow, sizeof(int)); hipError_t cudaStatus = InitGPUData(matrices, &dSize, size, &dStride, totalStride); if (cudaStatus != hipSuccess) { goto Error; } for (int i = 0; i < size; ++i) { for (int j = i + 1; j < (size + 1); ++j) { hipMemcpy(dPivotRow, &i, sizeof(int), hipMemcpyHostToDevice); hipMemcpy(dRow, &j, sizeof(int), hipMemcpyHostToDevice); ForwardEliminationColumn << <grid_dim, block_dim >> >(matrices[2], dSize, dRow, dStride, dPivotRow); } } CudaMemcpyMatrix(matrices, size, hipMemcpyDeviceToHost); timespec_get(&after, TIME_UTC); double timeTakenSec = after.tv_sec - before.tv_sec; long long timeTakenNsec = after.tv_nsec - before.tv_nsec; long long timeTakenMsec = round(timeTakenNsec / 1000000.f); timeTakenSec += (double)timeTakenMsec / 1000.f; int timeArrayPos = (stride / 2) * 10 + rep; GPUTimes[timeArrayPos] = timeTakenSec; BackwardSubstitute(matrices[1], size); for (int i = 0; i < size; ++i) { for (int j = 0; j < (size + 1); ++j) { if (matrices[1][i * (size + 1) + j] != matrices[0][i * (size + 1) + j]) { failed = i * (size + 1) + j; break; } } if (failed) break; } if (failed) { printf("Bad result\n"); printf("CPU:%f\t\t-\tGPU:%f\n", matrices[0][failed], matrices[1][failed]); } Error: hipFree(matrices[2]); hipFree(dSize); hipFree(dRow); hipFree(dStride); FillHostMatrix(matrices, size); } } free(matrices[0]); free(matrices[1]); /*if (!failed) { printf("Writing size %d to DV2575Ass2Times.csv\n", size); csv = fopen("DV2575Ass2Times.csv", "a"); for (int j = 0; j < 10; ++j) { fprintf(csv, "%f,", CPUTimes[j]); for (int i = 0; i < 5; ++i) { if (i == 3) { ++i; } fprintf(csv, "%f,", GPUTimes[i * 10 + j]); } fprintf(csv, "\n"); } fclose(csv); }*/ } free(matrices); system("PAUSE"); return 0; } void InitCPUData(double** matrices, int size) { srand((uint)time(NULL)); //malloc number of rows matrices[0] = (double*)malloc(size * (size + 1) * sizeof(double*)); matrices[1] = (double*)malloc(size * (size + 1) * sizeof(double*)); matrices[3] = (double*)malloc(size * (size + 1) * sizeof(double*)); double *s = (double*)malloc(size * sizeof(double)); for (int i = 0; i < size; ++i) { //fill row for (int j = 0; j < size; ++j) { matrices[0][i * (size + 1) + j] = matrices[1][i * (size + 1) + j] = matrices[3][i * (size + 1) + j] = (double)(rand() % 10 + 1); //not allowing zeros b/c easier } s[i] = (double)(rand() % 10 + 1); matrices[0][i * (size + 1) + j] = matrices[1][i * (size + 1) + j] = matrices[3][i * (size + 1) + j] = 1; } //Filling last column like this to ensure the system is solvable for (int i = 0; i < size; ++i) { for(int j = 0; j < size; ++j) { matrices[0][i * (size + 1) + size] = matrices[1][i * (size + 1) + size] = matrices[3][i * (size + 1) + size] += (s[j] * matrices[0][j]); } } } void FillHostMatrix(double** matrices, int size) { for (int i = 0; i < size; ++i) { for (int j = 0; j < (size + 1); ++j) { matrices[1][i * (size + 1) + j] = matrices[3][i * (size + 1) + j]; } } } hipError_t InitGPUData(double** matrices, int **dSize, int size, int **dStride, int stride) { hipError_t cudaStatus; cudaStatus = hipMalloc((void**)&matrices[2], size * (size + 1) * sizeof(double*)); if (cudaStatus != hipSuccess) { printf("\nCould not allocate device memory for matrix\n"); return cudaStatus; } cudaStatus = CudaMemcpyMatrix(matrices, size, hipMemcpyHostToDevice); if (cudaStatus != hipSuccess) { return cudaStatus; } cudaStatus = hipMalloc((void**)dSize, sizeof(int)); //double void pointer super imoprtant if (cudaStatus != hipSuccess) { printf("\nCould not allocate device memory for matrix size\n"); return cudaStatus; } cudaStatus = hipMemcpy((void*)*dSize, &size, sizeof(int), hipMemcpyHostToDevice); //maybe move this to TransferGPUData? if (cudaStatus != hipSuccess) { printf("\nCould not copy size variable from host to device\n"); return cudaStatus; } cudaStatus = hipMalloc((void**)dStride, sizeof(int)); if (cudaStatus != hipSuccess) { printf("\nCould not allocate device memory for thread stride\n"); return cudaStatus; } cudaStatus = hipMemcpy((void*)*dStride, &stride, sizeof(int), hipMemcpyHostToDevice); if (cudaStatus != hipSuccess) { printf("\nCould not copy stride variable from host to device\n"); return cudaStatus; } return cudaStatus; } hipError_t CudaMemcpyMatrix(double** matrices, int size, hipMemcpyKind flag) { hipError_t cudaStatus; int to = (flag == 1) + 1, from = (flag == 2) + 1; cudaStatus = hipMemcpy(matrices[to], matrices[from], size * (size + 1) * sizeof(double), flag); if (cudaStatus != hipSuccess) { printf("\nCould not copy matrix from "); flag == 1 ? printf("host to device\n") : printf("device to host\n"); } return cudaStatus; } void ForwardElimination(double* matrix, int size) { for (int i = 1; i < size; ++i) { for (int j = i; j < size; ++j) { //Calculate ratio between rows, so one can be reduced to 0 double ratio = matrix[j * (size + 1) + i - 1] / matrix[(i - 1) * (size + 1) + (i - 1)]; for (int k = 0; k < (size + 1); ++k) { matrix[j * (size + 1) + k] -= (ratio * matrix[(i - 1) * (size + 1) + k]); } } } } void BackwardSubstitute(double* matrix, int size) { for (int i = (size - 1); i > 0; --i) { matrix[i * (size + 1) + size] = matrix[i * (size + 1) + size] / matrix[i * (size + 1) + i]; for (int j = i - 1; j > -1; --j) { //Subtract from the rightmost element matrix[j * (size + 1) + size] -= matrix[j * (size + 1) + i] * matrix[i * (size + 1) + size]; //Eliminate element above matrix[j * (size + 1) + i] = 0; } matrix[i * (size + 1) + i] = 1.f; } matrix[size] = matrix[size] / matrix[0]; matrix[0] = 1.f; } __global__ void ForwardEliminationColumn(double* matrix, int* size, int* row, int* stride, int* pivotRow) { int _size = *size; int _row = *row; int _stride = *stride; int _pivotRow = *pivotRow; int startColumn = (blockIdx.x * blockDim.x + threadIdx.x) * _stride; double pivot = (double)matrix[_pivotRow * (_size + 1) + _pivotRow]; double belowPivot = (double)matrix[_row * (_size + 1) + _pivotRow]; double ratio = belowPivot / pivot; for (int i = 0; i < _stride; ++i) { if (startColumn + i < (_size + 1)) { matrix[_row * (_size + 1) + startColumn + i] -= (ratio * matrix[_pivotRow * (_size + 1) + startColumn + i]); __syncthreads(); } } }
a6d0e0e4967a08ba998585310ea8ba0a5d3bbe0b.cu
#include "cuda.h" #include "cuda_runtime.h" #include "device_launch_parameters.h" #include "device_functions.h" #include <stdio.h> #include <stdlib.h> #include <time.h> #include <math.h> #define uint unsigned int #ifdef __INTELLISENSE__ void __syncthreads(); #endif // __INTELLISENSE__ void InitCPUData(double** matrices, int size); void FillHostMatrix(double** matrices, int size); cudaError_t InitGPUData(double** matrices, int **dSize, int size, int **dStride, int stride); cudaError_t CudaMemcpyMatrix(double** matrices, int size, cudaMemcpyKind flag); void ForwardElimination(double* matrix, int size); void BackwardSubstitute(double* matrix, int size); __global__ void ForwardEliminationColumn(double* matrix, int* size, int* row, int* stride, int* pivotRow); int main() { dim3 grid_dim = dim3(1, 1, 1); dim3 block_dim = dim3(1024, 1, 1); double** matrices = (double**)malloc(4 * sizeof(double*)); //0 CPU, 1 HGPU, 2 DGPU, 3 Backup int size = 64; //Number of Rows / Columns, number of elements = size ^ 2 + size int *dSize = 0; for (size; size < 2049; size *= 2) //64, 128, 256, 512, 1024, 2048. { int failed = 0; FILE *csv; csv = fopen("DV2575Ass2Times.csv", "a"); fprintf(csv, "\nCPU,1,2,4,8\n"); fclose(csv); double GPUTimes[50] = { 0.f }; double CPUTimes[10] = { 0.f }; for (int rep = 0; rep < 10; ++rep) { //Init matrices and variable storage InitCPUData(matrices, size); timespec before; timespec after; timespec_get(&before, TIME_UTC); ForwardElimination(matrices[0], size); timespec_get(&after, TIME_UTC); double timeTakenSec = after.tv_sec - before.tv_sec; long long timeTakenNsec = after.tv_nsec - before.tv_nsec; long long timeTakenMsec = round(timeTakenNsec / 1000000.f); timeTakenSec += (double)timeTakenMsec / 1000.f; CPUTimes[rep] = timeTakenSec; BackwardSubstitute(matrices[0], size); for (int stride = 1; stride < 9; stride *= 2) //1, 2, 4, 8 { int *dStride = 0; int totalStride = stride * (size / (grid_dim.x * block_dim.x * stride/*Total number of threads, multiplied by the stride*/) + 1); //KERNEL CALL 1, Forward elimination int* dRow = 0; int* dPivotRow = 0; timespec_get(&before, TIME_UTC); cudaMalloc((void**)&dRow, sizeof(int)); cudaMalloc((void**)&dPivotRow, sizeof(int)); cudaError_t cudaStatus = InitGPUData(matrices, &dSize, size, &dStride, totalStride); if (cudaStatus != cudaSuccess) { goto Error; } for (int i = 0; i < size; ++i) { for (int j = i + 1; j < (size + 1); ++j) { cudaMemcpy(dPivotRow, &i, sizeof(int), cudaMemcpyHostToDevice); cudaMemcpy(dRow, &j, sizeof(int), cudaMemcpyHostToDevice); ForwardEliminationColumn << <grid_dim, block_dim >> >(matrices[2], dSize, dRow, dStride, dPivotRow); } } CudaMemcpyMatrix(matrices, size, cudaMemcpyDeviceToHost); timespec_get(&after, TIME_UTC); double timeTakenSec = after.tv_sec - before.tv_sec; long long timeTakenNsec = after.tv_nsec - before.tv_nsec; long long timeTakenMsec = round(timeTakenNsec / 1000000.f); timeTakenSec += (double)timeTakenMsec / 1000.f; int timeArrayPos = (stride / 2) * 10 + rep; GPUTimes[timeArrayPos] = timeTakenSec; BackwardSubstitute(matrices[1], size); for (int i = 0; i < size; ++i) { for (int j = 0; j < (size + 1); ++j) { if (matrices[1][i * (size + 1) + j] != matrices[0][i * (size + 1) + j]) { failed = i * (size + 1) + j; break; } } if (failed) break; } if (failed) { printf("Bad result\n"); printf("CPU:%f\t\t-\tGPU:%f\n", matrices[0][failed], matrices[1][failed]); } Error: cudaFree(matrices[2]); cudaFree(dSize); cudaFree(dRow); cudaFree(dStride); FillHostMatrix(matrices, size); } } free(matrices[0]); free(matrices[1]); /*if (!failed) { printf("Writing size %d to DV2575Ass2Times.csv\n", size); csv = fopen("DV2575Ass2Times.csv", "a"); for (int j = 0; j < 10; ++j) { fprintf(csv, "%f,", CPUTimes[j]); for (int i = 0; i < 5; ++i) { if (i == 3) { ++i; } fprintf(csv, "%f,", GPUTimes[i * 10 + j]); } fprintf(csv, "\n"); } fclose(csv); }*/ } free(matrices); system("PAUSE"); return 0; } void InitCPUData(double** matrices, int size) { srand((uint)time(NULL)); //malloc number of rows matrices[0] = (double*)malloc(size * (size + 1) * sizeof(double*)); matrices[1] = (double*)malloc(size * (size + 1) * sizeof(double*)); matrices[3] = (double*)malloc(size * (size + 1) * sizeof(double*)); double *s = (double*)malloc(size * sizeof(double)); for (int i = 0; i < size; ++i) { //fill row for (int j = 0; j < size; ++j) { matrices[0][i * (size + 1) + j] = matrices[1][i * (size + 1) + j] = matrices[3][i * (size + 1) + j] = (double)(rand() % 10 + 1); //not allowing zeros b/c easier } s[i] = (double)(rand() % 10 + 1); matrices[0][i * (size + 1) + j] = matrices[1][i * (size + 1) + j] = matrices[3][i * (size + 1) + j] = 1; } //Filling last column like this to ensure the system is solvable for (int i = 0; i < size; ++i) { for(int j = 0; j < size; ++j) { matrices[0][i * (size + 1) + size] = matrices[1][i * (size + 1) + size] = matrices[3][i * (size + 1) + size] += (s[j] * matrices[0][j]); } } } void FillHostMatrix(double** matrices, int size) { for (int i = 0; i < size; ++i) { for (int j = 0; j < (size + 1); ++j) { matrices[1][i * (size + 1) + j] = matrices[3][i * (size + 1) + j]; } } } cudaError_t InitGPUData(double** matrices, int **dSize, int size, int **dStride, int stride) { cudaError_t cudaStatus; cudaStatus = cudaMalloc((void**)&matrices[2], size * (size + 1) * sizeof(double*)); if (cudaStatus != cudaSuccess) { printf("\nCould not allocate device memory for matrix\n"); return cudaStatus; } cudaStatus = CudaMemcpyMatrix(matrices, size, cudaMemcpyHostToDevice); if (cudaStatus != cudaSuccess) { return cudaStatus; } cudaStatus = cudaMalloc((void**)dSize, sizeof(int)); //double void pointer super imoprtant if (cudaStatus != cudaSuccess) { printf("\nCould not allocate device memory for matrix size\n"); return cudaStatus; } cudaStatus = cudaMemcpy((void*)*dSize, &size, sizeof(int), cudaMemcpyHostToDevice); //maybe move this to TransferGPUData? if (cudaStatus != cudaSuccess) { printf("\nCould not copy size variable from host to device\n"); return cudaStatus; } cudaStatus = cudaMalloc((void**)dStride, sizeof(int)); if (cudaStatus != cudaSuccess) { printf("\nCould not allocate device memory for thread stride\n"); return cudaStatus; } cudaStatus = cudaMemcpy((void*)*dStride, &stride, sizeof(int), cudaMemcpyHostToDevice); if (cudaStatus != cudaSuccess) { printf("\nCould not copy stride variable from host to device\n"); return cudaStatus; } return cudaStatus; } cudaError_t CudaMemcpyMatrix(double** matrices, int size, cudaMemcpyKind flag) { cudaError_t cudaStatus; int to = (flag == 1) + 1, from = (flag == 2) + 1; cudaStatus = cudaMemcpy(matrices[to], matrices[from], size * (size + 1) * sizeof(double), flag); if (cudaStatus != cudaSuccess) { printf("\nCould not copy matrix from "); flag == 1 ? printf("host to device\n") : printf("device to host\n"); } return cudaStatus; } void ForwardElimination(double* matrix, int size) { for (int i = 1; i < size; ++i) { for (int j = i; j < size; ++j) { //Calculate ratio between rows, so one can be reduced to 0 double ratio = matrix[j * (size + 1) + i - 1] / matrix[(i - 1) * (size + 1) + (i - 1)]; for (int k = 0; k < (size + 1); ++k) { matrix[j * (size + 1) + k] -= (ratio * matrix[(i - 1) * (size + 1) + k]); } } } } void BackwardSubstitute(double* matrix, int size) { for (int i = (size - 1); i > 0; --i) { matrix[i * (size + 1) + size] = matrix[i * (size + 1) + size] / matrix[i * (size + 1) + i]; for (int j = i - 1; j > -1; --j) { //Subtract from the rightmost element matrix[j * (size + 1) + size] -= matrix[j * (size + 1) + i] * matrix[i * (size + 1) + size]; //Eliminate element above matrix[j * (size + 1) + i] = 0; } matrix[i * (size + 1) + i] = 1.f; } matrix[size] = matrix[size] / matrix[0]; matrix[0] = 1.f; } __global__ void ForwardEliminationColumn(double* matrix, int* size, int* row, int* stride, int* pivotRow) { int _size = *size; int _row = *row; int _stride = *stride; int _pivotRow = *pivotRow; int startColumn = (blockIdx.x * blockDim.x + threadIdx.x) * _stride; double pivot = (double)matrix[_pivotRow * (_size + 1) + _pivotRow]; double belowPivot = (double)matrix[_row * (_size + 1) + _pivotRow]; double ratio = belowPivot / pivot; for (int i = 0; i < _stride; ++i) { if (startColumn + i < (_size + 1)) { matrix[_row * (_size + 1) + startColumn + i] -= (ratio * matrix[_pivotRow * (_size + 1) + startColumn + i]); __syncthreads(); } } }
0b69a3befc0db19975f374b3f428a8413ffdd0d8.hip
// !!! This is a file automatically generated by hipify!!! /* * Copyright (c) 2020, NVIDIA CORPORATION. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #include <cudf/detail/gather.hpp> #include <groupby/sort/group_single_pass_reduction_util.cuh> #include <thrust/transform.h> namespace cudf { namespace groupby { namespace detail { std::unique_ptr<column> group_argmin(column_view const& values, size_type num_groups, rmm::device_vector<size_type> const& group_labels, column_view const& key_sort_order, rmm::mr::device_memory_resource* mr, hipStream_t stream) { auto indices = type_dispatcher(values.type(), reduce_functor<aggregation::ARGMIN>{}, values, num_groups, group_labels, rmm::mr::get_current_device_resource(), stream); // The functor returns the index of minimum in the sorted values. // We need the index of minimum in the original unsorted values. // So use indices to gather the sort order used to sort `values`. // Gather map cannot be null so we make a view with the mask removed. // The values in data buffer of indices corresponding to null values was // initialized to ARGMIN_SENTINEL which is an out of bounds index value (-1) // and causes the gathered value to be null. column_view null_removed_indices( data_type(type_to_id<size_type>()), indices->size(), static_cast<void const*>(indices->view().template data<size_type>())); auto result_table = cudf::detail::gather(table_view({key_sort_order}), null_removed_indices, indices->nullable() ? cudf::detail::out_of_bounds_policy::IGNORE : cudf::detail::out_of_bounds_policy::NULLIFY, cudf::detail::negative_index_policy::NOT_ALLOWED, mr, stream); return std::move(result_table->release()[0]); } } // namespace detail } // namespace groupby } // namespace cudf
0b69a3befc0db19975f374b3f428a8413ffdd0d8.cu
/* * Copyright (c) 2020, NVIDIA CORPORATION. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #include <cudf/detail/gather.hpp> #include <groupby/sort/group_single_pass_reduction_util.cuh> #include <thrust/transform.h> namespace cudf { namespace groupby { namespace detail { std::unique_ptr<column> group_argmin(column_view const& values, size_type num_groups, rmm::device_vector<size_type> const& group_labels, column_view const& key_sort_order, rmm::mr::device_memory_resource* mr, cudaStream_t stream) { auto indices = type_dispatcher(values.type(), reduce_functor<aggregation::ARGMIN>{}, values, num_groups, group_labels, rmm::mr::get_current_device_resource(), stream); // The functor returns the index of minimum in the sorted values. // We need the index of minimum in the original unsorted values. // So use indices to gather the sort order used to sort `values`. // Gather map cannot be null so we make a view with the mask removed. // The values in data buffer of indices corresponding to null values was // initialized to ARGMIN_SENTINEL which is an out of bounds index value (-1) // and causes the gathered value to be null. column_view null_removed_indices( data_type(type_to_id<size_type>()), indices->size(), static_cast<void const*>(indices->view().template data<size_type>())); auto result_table = cudf::detail::gather(table_view({key_sort_order}), null_removed_indices, indices->nullable() ? cudf::detail::out_of_bounds_policy::IGNORE : cudf::detail::out_of_bounds_policy::NULLIFY, cudf::detail::negative_index_policy::NOT_ALLOWED, mr, stream); return std::move(result_table->release()[0]); } } // namespace detail } // namespace groupby } // namespace cudf
5c4bcb517bd516c6519cc10626d5348036ba1118.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /* * Copyright (c) 2018, NVIDIA CORPORATION. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #include <gtest/gtest.h> #include "linalg/sqrt.h" #include "random/rng.h" #include "test_utils.h" namespace MLCommon { namespace LinAlg { template <typename Type> __global__ void naiveSqrtElemKernel(Type *out, const Type *in1, int len) { int idx = threadIdx.x + blockIdx.x * blockDim.x; if (idx < len) { out[idx] = mySqrt(in1[idx]); } } template <typename Type> void naiveSqrtElem(Type *out, const Type *in1, int len) { static const int TPB = 64; int nblks = ceildiv(len, TPB); hipLaunchKernelGGL(( naiveSqrtElemKernel<Type>), dim3(nblks), dim3(TPB), 0, 0, out, in1, len); CUDA_CHECK(hipPeekAtLastError()); } template <typename T> struct SqrtInputs { T tolerance; int len; unsigned long long int seed; }; template <typename T> ::std::ostream &operator<<(::std::ostream &os, const SqrtInputs<T> &dims) { return os; } template <typename T> class SqrtTest : public ::testing::TestWithParam<SqrtInputs<T>> { protected: void SetUp() override { params = ::testing::TestWithParam<SqrtInputs<T>>::GetParam(); Random::Rng<T> r(params.seed); int len = params.len; allocate(in1, len); allocate(out_ref, len); allocate(out, len); r.uniform(in1, len, T(1.0), T(2.0)); naiveSqrtElem(out_ref, in1, len); sqrt(out, in1, len); sqrt(in1, in1, len); } void TearDown() override { CUDA_CHECK(hipFree(in1)); CUDA_CHECK(hipFree(out_ref)); CUDA_CHECK(hipFree(out)); } protected: SqrtInputs<T> params; T *in1, *out_ref, *out; int device_count = 0; }; const std::vector<SqrtInputs<float>> inputsf2 = { {0.000001f, 1024 * 1024, 1234ULL}}; const std::vector<SqrtInputs<double>> inputsd2 = { {0.00000001, 1024 * 1024, 1234ULL}}; typedef SqrtTest<float> SqrtTestF; TEST_P(SqrtTestF, Result) { ASSERT_TRUE(devArrMatch(out_ref, out, params.len, CompareApprox<float>(params.tolerance))); ASSERT_TRUE(devArrMatch(out_ref, in1, params.len, CompareApprox<float>(params.tolerance))); } typedef SqrtTest<double> SqrtTestD; TEST_P(SqrtTestD, Result) { ASSERT_TRUE(devArrMatch(out_ref, out, params.len, CompareApprox<double>(params.tolerance))); ASSERT_TRUE(devArrMatch(out_ref, in1, params.len, CompareApprox<double>(params.tolerance))); } INSTANTIATE_TEST_CASE_P(SqrtTests, SqrtTestF, ::testing::ValuesIn(inputsf2)); INSTANTIATE_TEST_CASE_P(SqrtTests, SqrtTestD, ::testing::ValuesIn(inputsd2)); } // end namespace LinAlg } // end namespace MLCommon
5c4bcb517bd516c6519cc10626d5348036ba1118.cu
/* * Copyright (c) 2018, NVIDIA CORPORATION. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #include <gtest/gtest.h> #include "linalg/sqrt.h" #include "random/rng.h" #include "test_utils.h" namespace MLCommon { namespace LinAlg { template <typename Type> __global__ void naiveSqrtElemKernel(Type *out, const Type *in1, int len) { int idx = threadIdx.x + blockIdx.x * blockDim.x; if (idx < len) { out[idx] = mySqrt(in1[idx]); } } template <typename Type> void naiveSqrtElem(Type *out, const Type *in1, int len) { static const int TPB = 64; int nblks = ceildiv(len, TPB); naiveSqrtElemKernel<Type><<<nblks, TPB>>>(out, in1, len); CUDA_CHECK(cudaPeekAtLastError()); } template <typename T> struct SqrtInputs { T tolerance; int len; unsigned long long int seed; }; template <typename T> ::std::ostream &operator<<(::std::ostream &os, const SqrtInputs<T> &dims) { return os; } template <typename T> class SqrtTest : public ::testing::TestWithParam<SqrtInputs<T>> { protected: void SetUp() override { params = ::testing::TestWithParam<SqrtInputs<T>>::GetParam(); Random::Rng<T> r(params.seed); int len = params.len; allocate(in1, len); allocate(out_ref, len); allocate(out, len); r.uniform(in1, len, T(1.0), T(2.0)); naiveSqrtElem(out_ref, in1, len); sqrt(out, in1, len); sqrt(in1, in1, len); } void TearDown() override { CUDA_CHECK(cudaFree(in1)); CUDA_CHECK(cudaFree(out_ref)); CUDA_CHECK(cudaFree(out)); } protected: SqrtInputs<T> params; T *in1, *out_ref, *out; int device_count = 0; }; const std::vector<SqrtInputs<float>> inputsf2 = { {0.000001f, 1024 * 1024, 1234ULL}}; const std::vector<SqrtInputs<double>> inputsd2 = { {0.00000001, 1024 * 1024, 1234ULL}}; typedef SqrtTest<float> SqrtTestF; TEST_P(SqrtTestF, Result) { ASSERT_TRUE(devArrMatch(out_ref, out, params.len, CompareApprox<float>(params.tolerance))); ASSERT_TRUE(devArrMatch(out_ref, in1, params.len, CompareApprox<float>(params.tolerance))); } typedef SqrtTest<double> SqrtTestD; TEST_P(SqrtTestD, Result) { ASSERT_TRUE(devArrMatch(out_ref, out, params.len, CompareApprox<double>(params.tolerance))); ASSERT_TRUE(devArrMatch(out_ref, in1, params.len, CompareApprox<double>(params.tolerance))); } INSTANTIATE_TEST_CASE_P(SqrtTests, SqrtTestF, ::testing::ValuesIn(inputsf2)); INSTANTIATE_TEST_CASE_P(SqrtTests, SqrtTestD, ::testing::ValuesIn(inputsd2)); } // end namespace LinAlg } // end namespace MLCommon
1434d8419a56f876b3a7a9c2f219489b52cc59d5.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /*M/////////////////////////////////////////////////////////////////////////////////////// // // IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING. // // By downloading, copying, installing or using the software you agree to this license. // If you do not agree to this license, do not download, install, // copy or use the software. // // // License Agreement // For Open Source Computer Vision Library // // Copyright (C) 2000-2008, Intel Corporation, all rights reserved. // Copyright (C) 2009, Willow Garage Inc., all rights reserved. // Third party copyrights are property of their respective owners. // // Redistribution and use in source and binary forms, with or without modification, // are permitted provided that the following conditions are met: // // * Redistribution's of source code must retain the above copyright notice, // this list of conditions and the following disclaimer. // // * Redistribution's in binary form must reproduce the above copyright notice, // this list of conditions and the following disclaimer in the documentation // and/or other materials provided with the distribution. // // * The name of the copyright holders may not be used to endorse or promote products // derived from this software without specific prior written permission. // // This software is provided by the copyright holders and contributors "as is" and // any express or implied warranties, including, but not limited to, the implied // warranties of merchantability and fitness for a particular purpose are disclaimed. // In no event shall the Intel Corporation or contributors be liable for any direct, // indirect, incidental, special, exemplary, or consequential damages // (including, but not limited to, procurement of substitute goods or services; // loss of use, data, or profits; or business interruption) however caused // and on any theory of liability, whether in contract, strict liability, // or tort (including negligence or otherwise) arising in any way out of // the use of this software, even if advised of the possibility of such damage. // //M*/ #if !defined CUDA_DISABLER #include <thrust/device_ptr.h> #include <thrust/sort.h> #include "opencv2/core/cuda/common.hpp" #include "opencv2/core/cuda/emulation.hpp" #include "opencv2/core/cuda/dynamic_smem.hpp" namespace cv { namespace gpu { namespace cudev { namespace hough_lines { __device__ int g_counter; //////////////////////////////////////////////////////////////////////// // linesAccum __global__ void linesAccumGlobal(const unsigned int* list, const int count, PtrStepi accum, const float irho, const float theta, const int numrho) { const int n = blockIdx.x; const float ang = n * theta; float sinVal; float cosVal; sincosf(ang, &sinVal, &cosVal); sinVal *= irho; cosVal *= irho; const int shift = (numrho - 1) / 2; int* accumRow = accum.ptr(n + 1); for (int i = threadIdx.x; i < count; i += blockDim.x) { const unsigned int val = list[i]; const int x = (val & 0xFFFF); const int y = (val >> 16) & 0xFFFF; int r = __float2int_rn(x * cosVal + y * sinVal); r += shift; ::atomicAdd(accumRow + r + 1, 1); } } __global__ void linesAccumShared(const unsigned int* list, const int count, PtrStepi accum, const float irho, const float theta, const int numrho) { int* smem = DynamicSharedMem<int>(); for (int i = threadIdx.x; i < numrho + 1; i += blockDim.x) smem[i] = 0; __syncthreads(); const int n = blockIdx.x; const float ang = n * theta; float sinVal; float cosVal; sincosf(ang, &sinVal, &cosVal); sinVal *= irho; cosVal *= irho; const int shift = (numrho - 1) / 2; for (int i = threadIdx.x; i < count; i += blockDim.x) { const unsigned int val = list[i]; const int x = (val & 0xFFFF); const int y = (val >> 16) & 0xFFFF; int r = __float2int_rn(x * cosVal + y * sinVal); r += shift; Emulation::smem::atomicAdd(&smem[r + 1], 1); } __syncthreads(); int* accumRow = accum.ptr(n + 1); for (int i = threadIdx.x; i < numrho + 1; i += blockDim.x) accumRow[i] = smem[i]; } void linesAccum_gpu(const unsigned int* list, int count, PtrStepSzi accum, float rho, float theta, size_t sharedMemPerBlock, bool has20) { const dim3 block(has20 ? 1024 : 512); const dim3 grid(accum.rows - 2); size_t smemSize = (accum.cols - 1) * sizeof(int); if (smemSize < sharedMemPerBlock - 1000) hipLaunchKernelGGL(( linesAccumShared), dim3(grid), dim3(block), smemSize, 0, list, count, accum, 1.0f / rho, theta, accum.cols - 2); else hipLaunchKernelGGL(( linesAccumGlobal), dim3(grid), dim3(block), 0, 0, list, count, accum, 1.0f / rho, theta, accum.cols - 2); cudaSafeCall( hipGetLastError() ); cudaSafeCall( hipDeviceSynchronize() ); } //////////////////////////////////////////////////////////////////////// // linesGetResult __global__ void linesGetResult(const PtrStepSzi accum, float2* out, int* votes, const int maxSize, const float rho, const float theta, const int threshold, const int numrho) { const int r = blockIdx.x * blockDim.x + threadIdx.x; const int n = blockIdx.y * blockDim.y + threadIdx.y; if (r >= accum.cols - 2 || n >= accum.rows - 2) return; const int curVotes = accum(n + 1, r + 1); if (curVotes > threshold && curVotes > accum(n + 1, r) && curVotes >= accum(n + 1, r + 2) && curVotes > accum(n, r + 1) && curVotes >= accum(n + 2, r + 1)) { const float radius = (r - (numrho - 1) * 0.5f) * rho; const float angle = n * theta; const int ind = ::atomicAdd(&g_counter, 1); if (ind < maxSize) { out[ind] = make_float2(radius, angle); votes[ind] = curVotes; } } } int linesGetResult_gpu(PtrStepSzi accum, float2* out, int* votes, int maxSize, float rho, float theta, int threshold, bool doSort) { void* counterPtr; cudaSafeCall( hipGetSymbolAddress(&counterPtr, g_counter) ); cudaSafeCall( hipMemset(counterPtr, 0, sizeof(int)) ); const dim3 block(32, 8); const dim3 grid(divUp(accum.cols - 2, block.x), divUp(accum.rows - 2, block.y)); cudaSafeCall( hipFuncSetCacheConfig(linesGetResult, hipFuncCachePreferL1) ); hipLaunchKernelGGL(( linesGetResult), dim3(grid), dim3(block), 0, 0, accum, out, votes, maxSize, rho, theta, threshold, accum.cols - 2); cudaSafeCall( hipGetLastError() ); cudaSafeCall( hipDeviceSynchronize() ); int totalCount; cudaSafeCall( hipMemcpy(&totalCount, counterPtr, sizeof(int), hipMemcpyDeviceToHost) ); totalCount = ::min(totalCount, maxSize); if (doSort && totalCount > 0) { thrust::device_ptr<float2> outPtr(out); thrust::device_ptr<int> votesPtr(votes); thrust::sort_by_key(votesPtr, votesPtr + totalCount, outPtr, thrust::greater<int>()); } return totalCount; } } }}} #endif /* CUDA_DISABLER */
1434d8419a56f876b3a7a9c2f219489b52cc59d5.cu
/*M/////////////////////////////////////////////////////////////////////////////////////// // // IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING. // // By downloading, copying, installing or using the software you agree to this license. // If you do not agree to this license, do not download, install, // copy or use the software. // // // License Agreement // For Open Source Computer Vision Library // // Copyright (C) 2000-2008, Intel Corporation, all rights reserved. // Copyright (C) 2009, Willow Garage Inc., all rights reserved. // Third party copyrights are property of their respective owners. // // Redistribution and use in source and binary forms, with or without modification, // are permitted provided that the following conditions are met: // // * Redistribution's of source code must retain the above copyright notice, // this list of conditions and the following disclaimer. // // * Redistribution's in binary form must reproduce the above copyright notice, // this list of conditions and the following disclaimer in the documentation // and/or other materials provided with the distribution. // // * The name of the copyright holders may not be used to endorse or promote products // derived from this software without specific prior written permission. // // This software is provided by the copyright holders and contributors "as is" and // any express or implied warranties, including, but not limited to, the implied // warranties of merchantability and fitness for a particular purpose are disclaimed. // In no event shall the Intel Corporation or contributors be liable for any direct, // indirect, incidental, special, exemplary, or consequential damages // (including, but not limited to, procurement of substitute goods or services; // loss of use, data, or profits; or business interruption) however caused // and on any theory of liability, whether in contract, strict liability, // or tort (including negligence or otherwise) arising in any way out of // the use of this software, even if advised of the possibility of such damage. // //M*/ #if !defined CUDA_DISABLER #include <thrust/device_ptr.h> #include <thrust/sort.h> #include "opencv2/core/cuda/common.hpp" #include "opencv2/core/cuda/emulation.hpp" #include "opencv2/core/cuda/dynamic_smem.hpp" namespace cv { namespace gpu { namespace cudev { namespace hough_lines { __device__ int g_counter; //////////////////////////////////////////////////////////////////////// // linesAccum __global__ void linesAccumGlobal(const unsigned int* list, const int count, PtrStepi accum, const float irho, const float theta, const int numrho) { const int n = blockIdx.x; const float ang = n * theta; float sinVal; float cosVal; sincosf(ang, &sinVal, &cosVal); sinVal *= irho; cosVal *= irho; const int shift = (numrho - 1) / 2; int* accumRow = accum.ptr(n + 1); for (int i = threadIdx.x; i < count; i += blockDim.x) { const unsigned int val = list[i]; const int x = (val & 0xFFFF); const int y = (val >> 16) & 0xFFFF; int r = __float2int_rn(x * cosVal + y * sinVal); r += shift; ::atomicAdd(accumRow + r + 1, 1); } } __global__ void linesAccumShared(const unsigned int* list, const int count, PtrStepi accum, const float irho, const float theta, const int numrho) { int* smem = DynamicSharedMem<int>(); for (int i = threadIdx.x; i < numrho + 1; i += blockDim.x) smem[i] = 0; __syncthreads(); const int n = blockIdx.x; const float ang = n * theta; float sinVal; float cosVal; sincosf(ang, &sinVal, &cosVal); sinVal *= irho; cosVal *= irho; const int shift = (numrho - 1) / 2; for (int i = threadIdx.x; i < count; i += blockDim.x) { const unsigned int val = list[i]; const int x = (val & 0xFFFF); const int y = (val >> 16) & 0xFFFF; int r = __float2int_rn(x * cosVal + y * sinVal); r += shift; Emulation::smem::atomicAdd(&smem[r + 1], 1); } __syncthreads(); int* accumRow = accum.ptr(n + 1); for (int i = threadIdx.x; i < numrho + 1; i += blockDim.x) accumRow[i] = smem[i]; } void linesAccum_gpu(const unsigned int* list, int count, PtrStepSzi accum, float rho, float theta, size_t sharedMemPerBlock, bool has20) { const dim3 block(has20 ? 1024 : 512); const dim3 grid(accum.rows - 2); size_t smemSize = (accum.cols - 1) * sizeof(int); if (smemSize < sharedMemPerBlock - 1000) linesAccumShared<<<grid, block, smemSize>>>(list, count, accum, 1.0f / rho, theta, accum.cols - 2); else linesAccumGlobal<<<grid, block>>>(list, count, accum, 1.0f / rho, theta, accum.cols - 2); cudaSafeCall( cudaGetLastError() ); cudaSafeCall( cudaDeviceSynchronize() ); } //////////////////////////////////////////////////////////////////////// // linesGetResult __global__ void linesGetResult(const PtrStepSzi accum, float2* out, int* votes, const int maxSize, const float rho, const float theta, const int threshold, const int numrho) { const int r = blockIdx.x * blockDim.x + threadIdx.x; const int n = blockIdx.y * blockDim.y + threadIdx.y; if (r >= accum.cols - 2 || n >= accum.rows - 2) return; const int curVotes = accum(n + 1, r + 1); if (curVotes > threshold && curVotes > accum(n + 1, r) && curVotes >= accum(n + 1, r + 2) && curVotes > accum(n, r + 1) && curVotes >= accum(n + 2, r + 1)) { const float radius = (r - (numrho - 1) * 0.5f) * rho; const float angle = n * theta; const int ind = ::atomicAdd(&g_counter, 1); if (ind < maxSize) { out[ind] = make_float2(radius, angle); votes[ind] = curVotes; } } } int linesGetResult_gpu(PtrStepSzi accum, float2* out, int* votes, int maxSize, float rho, float theta, int threshold, bool doSort) { void* counterPtr; cudaSafeCall( cudaGetSymbolAddress(&counterPtr, g_counter) ); cudaSafeCall( cudaMemset(counterPtr, 0, sizeof(int)) ); const dim3 block(32, 8); const dim3 grid(divUp(accum.cols - 2, block.x), divUp(accum.rows - 2, block.y)); cudaSafeCall( cudaFuncSetCacheConfig(linesGetResult, cudaFuncCachePreferL1) ); linesGetResult<<<grid, block>>>(accum, out, votes, maxSize, rho, theta, threshold, accum.cols - 2); cudaSafeCall( cudaGetLastError() ); cudaSafeCall( cudaDeviceSynchronize() ); int totalCount; cudaSafeCall( cudaMemcpy(&totalCount, counterPtr, sizeof(int), cudaMemcpyDeviceToHost) ); totalCount = ::min(totalCount, maxSize); if (doSort && totalCount > 0) { thrust::device_ptr<float2> outPtr(out); thrust::device_ptr<int> votesPtr(votes); thrust::sort_by_key(votesPtr, votesPtr + totalCount, outPtr, thrust::greater<int>()); } return totalCount; } } }}} #endif /* CUDA_DISABLER */
fb4e019fe5bf9d60c7ad70ecf62cb63558f853c0.hip
// !!! This is a file automatically generated by hipify!!! #include "cudacommon.h" #include <cassert> #include <hip/hip_runtime.h> #include <hip/hip_runtime_api.h> #include <iostream> #include "OptionParser.h" #include "ResultDatabase.h" #include "Spmv.h" #include "util.h" #include "conf.h" using namespace std; texture<float, 1> vecTex; // vector textures texture<int2, 1> vecTexD; // Texture Readers (used so kernels can be templated) struct texReaderSP { __device__ __forceinline__ float operator()(const int idx) const { return tex1Dfetch(vecTex, idx); } }; struct texReaderDP { __device__ __forceinline__ double operator()(const int idx) const { int2 v = tex1Dfetch(vecTexD, idx); #if (__CUDA_ARCH__ < 130) // Devices before arch 130 don't support DP, and having the // __hiloint2double() intrinsic will cause compilation to fail. // This return statement added as a workaround -- it will compile, // but since the arch doesn't support DP, it will never be called return 0; #else return __hiloint2double(v.y, v.x); #endif } }; template <typename floatType> void memcpyHostToDevice(floatType *dst, floatType *src, int size ){ CUDA_SAFE_CALL(hipMemcpy(dst, src, size * sizeof(floatType),hipMemcpyHostToDevice)); }; template <typename floatType> void memcpyDeviceTexture(const void* devPtr, size_t size ){ if (sizeof(floatType) == sizeof(float)) { hipChannelFormatDesc channelDesc = hipCreateChannelDesc<float>(); CUDA_SAFE_CALL(hipBindTexture(0, vecTex, devPtr, channelDesc,size * sizeof(float))); }else { hipChannelFormatDesc channelDesc = hipCreateChannelDesc<int2>(); CUDA_SAFE_CALL(hipBindTexture(0, vecTexD, devPtr, channelDesc,size * sizeof(int2))); } }; template void memcpyHostToDevice<double>(double *dst, double *src, int size ); template void memcpyHostToDevice<int>(int *dst, int *src, int size ); template void memcpyDeviceTexture<double>(const void* devPtr, size_t size ); template void memcpyDeviceTexture<int>(const void* devPtr, size_t size ); // Forward declarations for kernels template <typename fpType, typename texReader> __global__ void spmv_csr_scalar_kernel(const fpType * __restrict__ val, const int * __restrict__ cols, const int * __restrict__ rowDelimiters, const int dim, fpType * __restrict__ out); template <typename fpType, typename texReader> __global__ void spmv_csr_scalar_section_kernel(const fpType * __restrict__ val, const int * __restrict__ cols, const int * __restrict__ rowDelimiters, const int dim, fpType * __restrict__ out, const int secStart); template <typename fpType, typename texReader> __global__ void spmv_csr_vector_kernel(const fpType * __restrict__ val, const int * __restrict__ cols, const int * __restrict__ rowDelimiters, const int dim, fpType * __restrict__ out); template <typename fpType, typename texReader> __global__ void spmv_csr_vector_section_kernel(const fpType * __restrict__ val, const int * __restrict__ cols, const int * __restrict__ rowDelimiters, const int dim, fpType * __restrict__ out, const int secStart); template <typename fpType, typename texReader> __global__ void spmv_ellpackr_kernel(const fpType * __restrict__ val, const int * __restrict__ cols, const int * __restrict__ rowLengths, const int dim, fpType * __restrict__ out); template <typename fpType> __global__ void zero(fpType * __restrict__ a, const int size); template <typename floatType> void csrTestScalar(ResultDatabase* resultDB, OptionParser* op, CSRMM<floatType> *csrHost, CSRMM<floatType> *csrDevice ){ int deviceStart = csrDevice->getStartPoint(); int *h_rowDelimiters = csrHost->getRowDelimiters()+deviceStart; int secStart = h_rowDelimiters[0] ; floatType *h_val = csrHost->getVal()+secStart; int *h_cols = csrHost->getCols()+secStart; floatType *h_vec = csrHost->getVec()+deviceStart; floatType *h_out = csrHost->getOut()+deviceStart; int numRows = csrDevice->getNumRows(); //int numNonZeroes = csrDevice->getNumNonZeroes(); int numNonZeroes = h_rowDelimiters[numRows]-secStart; //std::cout<<"secStart: "<<secStart<<std::endl; floatType *d_val = csrDevice->getVal(); int *d_cols = csrDevice->getCols(); int *d_rowDelimiters = csrDevice->getRowDelimiters(); floatType *d_vec = csrDevice->getVec(); floatType *d_out = csrDevice->getOut(); #ifdef CUDA_RECORD // Setup events for timing hipEvent_t start, stop; CUDA_SAFE_CALL(hipEventCreate(&start)); CUDA_SAFE_CALL(hipEventCreate(&stop)); // Transfer data to device CUDA_SAFE_CALL(hipEventRecord(start, 0)); #endif CUDA_SAFE_CALL(hipMemcpy(d_val, h_val, numNonZeroes * sizeof(floatType),hipMemcpyHostToDevice)); CUDA_SAFE_CALL(hipMemcpy(d_cols, h_cols, numNonZeroes * sizeof(int),hipMemcpyHostToDevice)); CUDA_SAFE_CALL(hipMemcpy(d_rowDelimiters, h_rowDelimiters,(numRows+1) * sizeof(int), hipMemcpyHostToDevice)); #ifdef CUDA_RECORD CUDA_SAFE_CALL(hipEventRecord(stop, 0)); CUDA_SAFE_CALL(hipEventSynchronize(stop)); float iTransferTime, oTransferTime; CUDA_SAFE_CALL(hipEventElapsedTime(&iTransferTime, start, stop)); iTransferTime *= 1.e-3; #endif // Bind texture for position string suffix; if (sizeof(floatType) == sizeof(float)){ suffix = "-SP"; }else { suffix = "-DP"; } // Setup thread configuration int nBlocksScalar = (int) ceil((floatType) numRows / BLOCK_SIZE); int nBlocksVector = (int) ceil(numRows /(floatType)(BLOCK_SIZE / WARP_SIZE)); int passes = op->getOptionInt("passes"); int iters = op->getOptionInt("iterations"); #ifdef CUDA_RECORD // Results description info char atts[TEMP_BUFFER_SIZE]; sprintf(atts, "%d_elements_%d_rows",numNonZeroes, numRows); string prefix = ""; double gflop = 2 * (double) numNonZeroes / 1e9; #endif #ifdef DARTS_DEBUG cout << "CSR Scalar Kernel\n"; #endif //cout<<"passes is : " <<passes<<", iters is "<< iters<<std::endl; //for (int k=0; k<passes; k++) //{ // Run Scalar Kernel #ifdef CUDA_RECORD CUDA_SAFE_CALL(hipEventRecord(start, 0)); #endif //for (int j = 0; j < iters; j++) //{ if(suffix == "-DP"){ hipLaunchKernelGGL(( spmv_csr_scalar_section_kernel<floatType, texReaderDP>), dim3(nBlocksScalar), dim3(BLOCK_SIZE), 0, 0, d_val, d_cols, d_rowDelimiters, numRows, d_out,secStart); }else{ hipLaunchKernelGGL(( spmv_csr_scalar_section_kernel<floatType, texReaderSP>), dim3(nBlocksScalar), dim3(BLOCK_SIZE), 0, 0, d_val, d_cols, d_rowDelimiters, numRows, d_out,secStart); } //} #ifdef CUDA_RECORD CUDA_SAFE_CALL(hipEventRecord(stop, 0)); CUDA_SAFE_CALL(hipEventSynchronize(stop)); float scalarKernelTime; CUDA_SAFE_CALL(hipEventElapsedTime(&scalarKernelTime, start, stop)); // Transfer data back to host CUDA_SAFE_CALL(hipEventRecord(start, 0)); #endif CUDA_SAFE_CALL(hipMemcpy(h_out, d_out, numRows * sizeof(floatType),hipMemcpyDeviceToHost)); #ifdef CUDA_RECORD CUDA_SAFE_CALL(hipEventRecord(stop, 0)); CUDA_SAFE_CALL(hipEventSynchronize(stop)); CUDA_SAFE_CALL(hipEventElapsedTime(&oTransferTime, start, stop)); #endif hipDeviceSynchronize(); #ifdef CUDA_RECORD oTransferTime *= 1.e-3; scalarKernelTime = (scalarKernelTime / (float)iters) * 1.e-3; double totalTransfer = iTransferTime + oTransferTime; string startPoint = std::to_string(csrDevice->getStartPoint()); string testName = prefix+"CSR-Scalar"+suffix+"-startPoint-"+startPoint; resultDB->AddResult(testName, atts, "Gflop/s",gflop/(scalarKernelTime)); resultDB->AddResult(testName, atts, "Gflop/s",gflop / (scalarKernelTime+totalTransfer)); //resultDB->AddResult(testName+"_PCIe", atts, "Gflop/s",gflop / (scalarKernelTime+totalTransfer)); #endif //} } template <typename floatType> void csrTestVector(ResultDatabase* resultDB, OptionParser* op, CSRMM<floatType> *csrHost, CSRMM<floatType> *csrDevice ){ int deviceStart = csrDevice->getStartPoint(); int *h_rowDelimiters = csrHost->getRowDelimiters()+deviceStart; int secStart = h_rowDelimiters[0] ; floatType *h_val = csrHost->getVal()+secStart; int *h_cols = csrHost->getCols()+secStart; floatType *h_vec = csrHost->getVec()+deviceStart; floatType *h_out = csrHost->getOut()+deviceStart; int numRows = csrDevice->getNumRows(); int numNonZeroes = csrDevice->getNumNonZeroes(); //std::cout<<"secStart: "<<secStart<<std::endl; floatType *d_val = csrDevice->getVal(); int *d_cols = csrDevice->getCols(); int *d_rowDelimiters = csrDevice->getRowDelimiters(); floatType *d_vec = csrDevice->getVec(); floatType *d_out = csrDevice->getOut(); // Setup events for timing hipEvent_t start, stop; CUDA_SAFE_CALL(hipEventCreate(&start)); CUDA_SAFE_CALL(hipEventCreate(&stop)); // Transfer data to device CUDA_SAFE_CALL(hipEventRecord(start, 0)); CUDA_SAFE_CALL(hipMemcpy(d_val, h_val, numNonZeroes * sizeof(floatType),hipMemcpyHostToDevice)); CUDA_SAFE_CALL(hipMemcpy(d_cols, h_cols, numNonZeroes * sizeof(int),hipMemcpyHostToDevice)); // CUDA_SAFE_CALL(hipMemcpy(d_vec, h_vec, numRows * sizeof(floatType),hipMemcpyHostToDevice)); CUDA_SAFE_CALL(hipMemcpy(d_rowDelimiters, h_rowDelimiters,(numRows+1) * sizeof(int), hipMemcpyHostToDevice)); CUDA_SAFE_CALL(hipEventRecord(stop, 0)); CUDA_SAFE_CALL(hipEventSynchronize(stop)); float iTransferTime, oTransferTime; CUDA_SAFE_CALL(hipEventElapsedTime(&iTransferTime, start, stop)); iTransferTime *= 1.e-3; // Bind texture for position string suffix; if (sizeof(floatType) == sizeof(float)){ // hipChannelFormatDesc channelDesc = hipCreateChannelDesc<float>(); // CUDA_SAFE_CALL(hipBindTexture(0, vecTex, d_vec, channelDesc,numRows * sizeof(float))); suffix = "-SP"; } else { // hipChannelFormatDesc channelDesc = hipCreateChannelDesc<int2>(); // CUDA_SAFE_CALL(hipBindTexture(0, vecTexD, d_vec, channelDesc,numRows * sizeof(int2))); suffix = "-DP"; } // Setup thread configuration int nBlocksScalar = (int) ceil((floatType) numRows / BLOCK_SIZE); int nBlocksVector = (int) ceil(numRows /(floatType)(BLOCK_SIZE / WARP_SIZE)); int passes = op->getOptionInt("passes"); int iters = op->getOptionInt("iterations"); // Results description info char atts[TEMP_BUFFER_SIZE]; sprintf(atts, "%d_elements_%d_rows", numNonZeroes, numRows); string prefix = ""; double gflop = 2 * (double) numNonZeroes / 1e9; cout << "CSR vector Kernel\n"; //cout<<"passes is : " <<passes<<", iters is "<< iters<<std::endl; //for (int k=0; k<passes; k++) //{ // Run Scalar Kernel CUDA_SAFE_CALL(hipEventRecord(start, 0)); //for (int j = 0; j < iters; j++) //{ if(suffix == "-DP"){ hipLaunchKernelGGL(( spmv_csr_vector_section_kernel<floatType, texReaderDP>), dim3(nBlocksVector), dim3(BLOCK_SIZE), 0, 0, d_val, d_cols, d_rowDelimiters, numRows, d_out,secStart); }else{ hipLaunchKernelGGL(( spmv_csr_vector_section_kernel<floatType, texReaderSP>), dim3(nBlocksScalar), dim3(BLOCK_SIZE), 0, 0, d_val, d_cols, d_rowDelimiters, numRows, d_out,secStart); } //} CUDA_SAFE_CALL(hipEventRecord(stop, 0)); CUDA_SAFE_CALL(hipEventSynchronize(stop)); float vectorKernelTime; CUDA_SAFE_CALL(hipEventElapsedTime(&vectorKernelTime, start, stop)); // Transfer data back to host CUDA_SAFE_CALL(hipEventRecord(start, 0)); CUDA_SAFE_CALL(hipMemcpy(h_out, d_out, numRows * sizeof(floatType),hipMemcpyDeviceToHost)); CUDA_SAFE_CALL(hipEventRecord(stop, 0)); CUDA_SAFE_CALL(hipEventSynchronize(stop)); CUDA_SAFE_CALL(hipEventElapsedTime(&oTransferTime, start, stop)); hipDeviceSynchronize(); vectorKernelTime = (vectorKernelTime / (float)iters) * 1.e-3; string testName = prefix+"CSR-Vector"+suffix; double totalTransfer = iTransferTime + oTransferTime; resultDB->AddResult(testName, atts, "Gflop/s",gflop/(vectorKernelTime)); resultDB->AddResult(testName+"_PCIe", atts, "Gflop/s",gflop / (vectorKernelTime+totalTransfer)); //} } // **************************************************************************** // Function: spmv_csr_scalar_kernel // // Purpose: // Computes sparse matrix - vector multiplication on the GPU using // the CSR data storage format, using a thread per row of the sparse // matrix; based on Bell (SC09) and Baskaran (IBM Tech Report) // // Arguments: // val: array holding the non-zero values for the matrix // cols: array of column indices for each element of the sparse matrix // rowDelimiters: array of size dim+1 holding indices to rows of the matrix // last element is the index one past the last // element of the matrix // dim: number of rows in the matrix // out: output - result from the spmv calculation // // Returns: nothing // out indirectly through a pointer // // Programmer: Lukasz Wesolowski // Creation: June 28, 2010 // // Modifications: // // **************************************************************************** template <typename fpType, typename texReader> __global__ void spmv_csr_scalar_kernel(const fpType * __restrict__ val, const int * __restrict__ cols, const int * __restrict__ rowDelimiters, const int dim, fpType * __restrict__ out) { int myRow = blockIdx.x * blockDim.x + threadIdx.x; texReader vecTexReader; if (myRow < dim) { fpType t = 0.0f; int start = rowDelimiters[myRow]; int end = rowDelimiters[myRow+1]; for (int j = start; j < end; j++) { int col = cols[j]; t += val[j] * vecTexReader(col); #ifdef DARTS_DEBUG if(threadIdx.x <20&&blockIdx.x ==0){ printf("val[%d]=%lf, vecTexReader(%d)=%lf\n",j,val[j],col,vecTexReader(col)); } #endif } out[myRow] = t; } } template <typename fpType, typename texReader> __global__ void spmv_csr_scalar_section_kernel(const fpType * __restrict__ val, const int * __restrict__ cols, const int * __restrict__ rowDelimiters, const int dim, fpType * __restrict__ out, const int secStart) { int myRow = blockIdx.x * blockDim.x + threadIdx.x; texReader vecTexReader; if (myRow < dim) { fpType t = 0.0f; int start = rowDelimiters[myRow]-secStart; int end = rowDelimiters[myRow+1]-secStart; for (int j = start; j < end; j++) { int col = cols[j]; t += val[j] * vecTexReader(col); #ifdef DARTS_DEBUG // if(threadIdx.x <20&&blockIdx.x ==0){ // printf("val[%d]=%lf, vecTexReader(%d)=%lf\n",j,val[j],col,vecTexReader(col)); // } #endif } out[myRow] = t; } } // **************************************************************************** // Function: spmv_csr_vector_kernel // // Purpose: // Computes sparse matrix - vector multiplication on the GPU using // the CSR data storage format, using a warp per row of the sparse // matrix; based on Bell (SC09) and Baskaran (IBM Tech Report) // // Arguments: // val: array holding the non-zero values for the matrix // cols: array of column indices for each element of the sparse matrix // rowDelimiters: array of size dim+1 holding indices to rows of the matrix // last element is the index one past the last // element of the matrix // dim: number of rows in the matrix // out: output - result from the spmv calculation // // Returns: nothing // out indirectly through a pointer // // Programmer: Lukasz Wesolowski // Creation: June 28, 2010 // // Modifications: // // **************************************************************************** template <typename fpType, typename texReader> __global__ void spmv_csr_vector_kernel(const fpType * __restrict__ val, const int * __restrict__ cols, const int * __restrict__ rowDelimiters, const int dim, fpType * __restrict__ out) { // Thread ID in block int t = threadIdx.x; // Thread ID within warp int id = t & (warpSize-1); int warpsPerBlock = blockDim.x / warpSize; // One row per warp int myRow = (blockIdx.x * warpsPerBlock) + (t / warpSize); // Texture reader for the dense vector texReader vecTexReader; __shared__ volatile fpType partialSums[BLOCK_SIZE]; if (myRow < dim) { int warpStart = rowDelimiters[myRow]; int warpEnd = rowDelimiters[myRow+1]; fpType mySum = 0; for (int j = warpStart + id; j < warpEnd; j += warpSize) { int col = cols[j]; mySum += val[j] * vecTexReader(col); } partialSums[t] = mySum; // Reduce partial sums if (id < 16) partialSums[t] += partialSums[t+16]; if (id < 8) partialSums[t] += partialSums[t+ 8]; if (id < 4) partialSums[t] += partialSums[t+ 4]; if (id < 2) partialSums[t] += partialSums[t+ 2]; if (id < 1) partialSums[t] += partialSums[t+ 1]; // Write result if (id == 0) { out[myRow] = partialSums[t]; } } } template <typename fpType, typename texReader> __global__ void spmv_csr_vector_section_kernel(const fpType * __restrict__ val, const int * __restrict__ cols, const int * __restrict__ rowDelimiters, const int dim, fpType * __restrict__ out, const int secStart) { // Thread ID in block int t = threadIdx.x; // Thread ID within warp int id = t & (warpSize-1); int warpsPerBlock = blockDim.x / warpSize; // One row per warp int myRow = (blockIdx.x * warpsPerBlock) + (t / warpSize); // Texture reader for the dense vector texReader vecTexReader; __shared__ volatile fpType partialSums[BLOCK_SIZE]; if (myRow < dim) { int warpStart = rowDelimiters[myRow]-secStart; int warpEnd = rowDelimiters[myRow+1]-secStart; fpType mySum = 0; for (int j = warpStart + id; j < warpEnd; j += warpSize) { int col = cols[j]; mySum += val[j] * vecTexReader(col); } partialSums[t] = mySum; // Reduce partial sums if (id < 16) partialSums[t] += partialSums[t+16]; if (id < 8) partialSums[t] += partialSums[t+ 8]; if (id < 4) partialSums[t] += partialSums[t+ 4]; if (id < 2) partialSums[t] += partialSums[t+ 2]; if (id < 1) partialSums[t] += partialSums[t+ 1]; // Write result if (id == 0) { out[myRow] = partialSums[t]; } } } // **************************************************************************** // Function: spmv_ellpackr_kernel // // Purpose: // Computes sparse matrix - vector multiplication on the GPU using // the ELLPACK-R data storage format; based on Vazquez et al (Univ. of // Almeria Tech Report 2009) // // Arguments: // val: array holding the non-zero values for the matrix in column // major format and padded with zeros up to the length of longest row // cols: array of column indices for each element of the sparse matrix // rowLengths: array storing the length of each row of the sparse matrix // dim: number of rows in the matrix // out: output - result from the spmv calculation // // Returns: nothing directly // out indirectly through a pointer // // Programmer: Lukasz Wesolowski // Creation: June 29, 2010 // // Modifications: // // **************************************************************************** template <typename fpType, typename texReader> __global__ void spmv_ellpackr_kernel(const fpType * __restrict__ val, const int * __restrict__ cols, const int * __restrict__ rowLengths, const int dim, fpType * __restrict__ out) { int t = blockIdx.x * blockDim.x + threadIdx.x; texReader vecTexReader; if (t < dim) { fpType result = 0.0f; int max = rowLengths[t]; for (int i = 0; i < max; i++) { int ind = i*dim+t; result += val[ind] * vecTexReader(cols[ind]); } out[t] = result; } } template <typename fpType> __global__ void zero(fpType * __restrict__ a, const int size) { int t = blockIdx.x * blockDim.x + threadIdx.x; if (t < size) a[t] = 0; } template void csrTestScalar<double>(ResultDatabase* resultDB, OptionParser* op, CSRMM<double> *csrHost, CSRMM<double> *csrDevice ); template void csrTestScalar<int>(ResultDatabase* resultDB, OptionParser* op, CSRMM<int> *csrHost, CSRMM<int> *csrDevice ); template void csrTestVector<double>(ResultDatabase* resultDB, OptionParser* op, CSRMM<double> *csrHost, CSRMM<double> *csrDevice ); template void csrTestVector<int>(ResultDatabase* resultDB, OptionParser* op, CSRMM<int> *csrHost, CSRMM<int> *csrDevice );
fb4e019fe5bf9d60c7ad70ecf62cb63558f853c0.cu
#include "cudacommon.h" #include <cassert> #include <cuda.h> #include <cuda_runtime_api.h> #include <iostream> #include "OptionParser.h" #include "ResultDatabase.h" #include "Spmv.h" #include "util.h" #include "conf.h" using namespace std; texture<float, 1> vecTex; // vector textures texture<int2, 1> vecTexD; // Texture Readers (used so kernels can be templated) struct texReaderSP { __device__ __forceinline__ float operator()(const int idx) const { return tex1Dfetch(vecTex, idx); } }; struct texReaderDP { __device__ __forceinline__ double operator()(const int idx) const { int2 v = tex1Dfetch(vecTexD, idx); #if (__CUDA_ARCH__ < 130) // Devices before arch 130 don't support DP, and having the // __hiloint2double() intrinsic will cause compilation to fail. // This return statement added as a workaround -- it will compile, // but since the arch doesn't support DP, it will never be called return 0; #else return __hiloint2double(v.y, v.x); #endif } }; template <typename floatType> void memcpyHostToDevice(floatType *dst, floatType *src, int size ){ CUDA_SAFE_CALL(cudaMemcpy(dst, src, size * sizeof(floatType),cudaMemcpyHostToDevice)); }; template <typename floatType> void memcpyDeviceTexture(const void* devPtr, size_t size ){ if (sizeof(floatType) == sizeof(float)) { cudaChannelFormatDesc channelDesc = cudaCreateChannelDesc<float>(); CUDA_SAFE_CALL(cudaBindTexture(0, vecTex, devPtr, channelDesc,size * sizeof(float))); }else { cudaChannelFormatDesc channelDesc = cudaCreateChannelDesc<int2>(); CUDA_SAFE_CALL(cudaBindTexture(0, vecTexD, devPtr, channelDesc,size * sizeof(int2))); } }; template void memcpyHostToDevice<double>(double *dst, double *src, int size ); template void memcpyHostToDevice<int>(int *dst, int *src, int size ); template void memcpyDeviceTexture<double>(const void* devPtr, size_t size ); template void memcpyDeviceTexture<int>(const void* devPtr, size_t size ); // Forward declarations for kernels template <typename fpType, typename texReader> __global__ void spmv_csr_scalar_kernel(const fpType * __restrict__ val, const int * __restrict__ cols, const int * __restrict__ rowDelimiters, const int dim, fpType * __restrict__ out); template <typename fpType, typename texReader> __global__ void spmv_csr_scalar_section_kernel(const fpType * __restrict__ val, const int * __restrict__ cols, const int * __restrict__ rowDelimiters, const int dim, fpType * __restrict__ out, const int secStart); template <typename fpType, typename texReader> __global__ void spmv_csr_vector_kernel(const fpType * __restrict__ val, const int * __restrict__ cols, const int * __restrict__ rowDelimiters, const int dim, fpType * __restrict__ out); template <typename fpType, typename texReader> __global__ void spmv_csr_vector_section_kernel(const fpType * __restrict__ val, const int * __restrict__ cols, const int * __restrict__ rowDelimiters, const int dim, fpType * __restrict__ out, const int secStart); template <typename fpType, typename texReader> __global__ void spmv_ellpackr_kernel(const fpType * __restrict__ val, const int * __restrict__ cols, const int * __restrict__ rowLengths, const int dim, fpType * __restrict__ out); template <typename fpType> __global__ void zero(fpType * __restrict__ a, const int size); template <typename floatType> void csrTestScalar(ResultDatabase* resultDB, OptionParser* op, CSRMM<floatType> *csrHost, CSRMM<floatType> *csrDevice ){ int deviceStart = csrDevice->getStartPoint(); int *h_rowDelimiters = csrHost->getRowDelimiters()+deviceStart; int secStart = h_rowDelimiters[0] ; floatType *h_val = csrHost->getVal()+secStart; int *h_cols = csrHost->getCols()+secStart; floatType *h_vec = csrHost->getVec()+deviceStart; floatType *h_out = csrHost->getOut()+deviceStart; int numRows = csrDevice->getNumRows(); //int numNonZeroes = csrDevice->getNumNonZeroes(); int numNonZeroes = h_rowDelimiters[numRows]-secStart; //std::cout<<"secStart: "<<secStart<<std::endl; floatType *d_val = csrDevice->getVal(); int *d_cols = csrDevice->getCols(); int *d_rowDelimiters = csrDevice->getRowDelimiters(); floatType *d_vec = csrDevice->getVec(); floatType *d_out = csrDevice->getOut(); #ifdef CUDA_RECORD // Setup events for timing cudaEvent_t start, stop; CUDA_SAFE_CALL(cudaEventCreate(&start)); CUDA_SAFE_CALL(cudaEventCreate(&stop)); // Transfer data to device CUDA_SAFE_CALL(cudaEventRecord(start, 0)); #endif CUDA_SAFE_CALL(cudaMemcpy(d_val, h_val, numNonZeroes * sizeof(floatType),cudaMemcpyHostToDevice)); CUDA_SAFE_CALL(cudaMemcpy(d_cols, h_cols, numNonZeroes * sizeof(int),cudaMemcpyHostToDevice)); CUDA_SAFE_CALL(cudaMemcpy(d_rowDelimiters, h_rowDelimiters,(numRows+1) * sizeof(int), cudaMemcpyHostToDevice)); #ifdef CUDA_RECORD CUDA_SAFE_CALL(cudaEventRecord(stop, 0)); CUDA_SAFE_CALL(cudaEventSynchronize(stop)); float iTransferTime, oTransferTime; CUDA_SAFE_CALL(cudaEventElapsedTime(&iTransferTime, start, stop)); iTransferTime *= 1.e-3; #endif // Bind texture for position string suffix; if (sizeof(floatType) == sizeof(float)){ suffix = "-SP"; }else { suffix = "-DP"; } // Setup thread configuration int nBlocksScalar = (int) ceil((floatType) numRows / BLOCK_SIZE); int nBlocksVector = (int) ceil(numRows /(floatType)(BLOCK_SIZE / WARP_SIZE)); int passes = op->getOptionInt("passes"); int iters = op->getOptionInt("iterations"); #ifdef CUDA_RECORD // Results description info char atts[TEMP_BUFFER_SIZE]; sprintf(atts, "%d_elements_%d_rows",numNonZeroes, numRows); string prefix = ""; double gflop = 2 * (double) numNonZeroes / 1e9; #endif #ifdef DARTS_DEBUG cout << "CSR Scalar Kernel\n"; #endif //cout<<"passes is : " <<passes<<", iters is "<< iters<<std::endl; //for (int k=0; k<passes; k++) //{ // Run Scalar Kernel #ifdef CUDA_RECORD CUDA_SAFE_CALL(cudaEventRecord(start, 0)); #endif //for (int j = 0; j < iters; j++) //{ if(suffix == "-DP"){ spmv_csr_scalar_section_kernel<floatType, texReaderDP><<<nBlocksScalar, BLOCK_SIZE>>> (d_val, d_cols, d_rowDelimiters, numRows, d_out,secStart); }else{ spmv_csr_scalar_section_kernel<floatType, texReaderSP><<<nBlocksScalar, BLOCK_SIZE>>> (d_val, d_cols, d_rowDelimiters, numRows, d_out,secStart); } //} #ifdef CUDA_RECORD CUDA_SAFE_CALL(cudaEventRecord(stop, 0)); CUDA_SAFE_CALL(cudaEventSynchronize(stop)); float scalarKernelTime; CUDA_SAFE_CALL(cudaEventElapsedTime(&scalarKernelTime, start, stop)); // Transfer data back to host CUDA_SAFE_CALL(cudaEventRecord(start, 0)); #endif CUDA_SAFE_CALL(cudaMemcpy(h_out, d_out, numRows * sizeof(floatType),cudaMemcpyDeviceToHost)); #ifdef CUDA_RECORD CUDA_SAFE_CALL(cudaEventRecord(stop, 0)); CUDA_SAFE_CALL(cudaEventSynchronize(stop)); CUDA_SAFE_CALL(cudaEventElapsedTime(&oTransferTime, start, stop)); #endif cudaThreadSynchronize(); #ifdef CUDA_RECORD oTransferTime *= 1.e-3; scalarKernelTime = (scalarKernelTime / (float)iters) * 1.e-3; double totalTransfer = iTransferTime + oTransferTime; string startPoint = std::to_string(csrDevice->getStartPoint()); string testName = prefix+"CSR-Scalar"+suffix+"-startPoint-"+startPoint; resultDB->AddResult(testName, atts, "Gflop/s",gflop/(scalarKernelTime)); resultDB->AddResult(testName, atts, "Gflop/s",gflop / (scalarKernelTime+totalTransfer)); //resultDB->AddResult(testName+"_PCIe", atts, "Gflop/s",gflop / (scalarKernelTime+totalTransfer)); #endif //} } template <typename floatType> void csrTestVector(ResultDatabase* resultDB, OptionParser* op, CSRMM<floatType> *csrHost, CSRMM<floatType> *csrDevice ){ int deviceStart = csrDevice->getStartPoint(); int *h_rowDelimiters = csrHost->getRowDelimiters()+deviceStart; int secStart = h_rowDelimiters[0] ; floatType *h_val = csrHost->getVal()+secStart; int *h_cols = csrHost->getCols()+secStart; floatType *h_vec = csrHost->getVec()+deviceStart; floatType *h_out = csrHost->getOut()+deviceStart; int numRows = csrDevice->getNumRows(); int numNonZeroes = csrDevice->getNumNonZeroes(); //std::cout<<"secStart: "<<secStart<<std::endl; floatType *d_val = csrDevice->getVal(); int *d_cols = csrDevice->getCols(); int *d_rowDelimiters = csrDevice->getRowDelimiters(); floatType *d_vec = csrDevice->getVec(); floatType *d_out = csrDevice->getOut(); // Setup events for timing cudaEvent_t start, stop; CUDA_SAFE_CALL(cudaEventCreate(&start)); CUDA_SAFE_CALL(cudaEventCreate(&stop)); // Transfer data to device CUDA_SAFE_CALL(cudaEventRecord(start, 0)); CUDA_SAFE_CALL(cudaMemcpy(d_val, h_val, numNonZeroes * sizeof(floatType),cudaMemcpyHostToDevice)); CUDA_SAFE_CALL(cudaMemcpy(d_cols, h_cols, numNonZeroes * sizeof(int),cudaMemcpyHostToDevice)); // CUDA_SAFE_CALL(cudaMemcpy(d_vec, h_vec, numRows * sizeof(floatType),cudaMemcpyHostToDevice)); CUDA_SAFE_CALL(cudaMemcpy(d_rowDelimiters, h_rowDelimiters,(numRows+1) * sizeof(int), cudaMemcpyHostToDevice)); CUDA_SAFE_CALL(cudaEventRecord(stop, 0)); CUDA_SAFE_CALL(cudaEventSynchronize(stop)); float iTransferTime, oTransferTime; CUDA_SAFE_CALL(cudaEventElapsedTime(&iTransferTime, start, stop)); iTransferTime *= 1.e-3; // Bind texture for position string suffix; if (sizeof(floatType) == sizeof(float)){ // cudaChannelFormatDesc channelDesc = cudaCreateChannelDesc<float>(); // CUDA_SAFE_CALL(cudaBindTexture(0, vecTex, d_vec, channelDesc,numRows * sizeof(float))); suffix = "-SP"; } else { // cudaChannelFormatDesc channelDesc = cudaCreateChannelDesc<int2>(); // CUDA_SAFE_CALL(cudaBindTexture(0, vecTexD, d_vec, channelDesc,numRows * sizeof(int2))); suffix = "-DP"; } // Setup thread configuration int nBlocksScalar = (int) ceil((floatType) numRows / BLOCK_SIZE); int nBlocksVector = (int) ceil(numRows /(floatType)(BLOCK_SIZE / WARP_SIZE)); int passes = op->getOptionInt("passes"); int iters = op->getOptionInt("iterations"); // Results description info char atts[TEMP_BUFFER_SIZE]; sprintf(atts, "%d_elements_%d_rows", numNonZeroes, numRows); string prefix = ""; double gflop = 2 * (double) numNonZeroes / 1e9; cout << "CSR vector Kernel\n"; //cout<<"passes is : " <<passes<<", iters is "<< iters<<std::endl; //for (int k=0; k<passes; k++) //{ // Run Scalar Kernel CUDA_SAFE_CALL(cudaEventRecord(start, 0)); //for (int j = 0; j < iters; j++) //{ if(suffix == "-DP"){ spmv_csr_vector_section_kernel<floatType, texReaderDP><<<nBlocksVector, BLOCK_SIZE>>> (d_val, d_cols, d_rowDelimiters, numRows, d_out,secStart); }else{ spmv_csr_vector_section_kernel<floatType, texReaderSP><<<nBlocksScalar, BLOCK_SIZE>>> (d_val, d_cols, d_rowDelimiters, numRows, d_out,secStart); } //} CUDA_SAFE_CALL(cudaEventRecord(stop, 0)); CUDA_SAFE_CALL(cudaEventSynchronize(stop)); float vectorKernelTime; CUDA_SAFE_CALL(cudaEventElapsedTime(&vectorKernelTime, start, stop)); // Transfer data back to host CUDA_SAFE_CALL(cudaEventRecord(start, 0)); CUDA_SAFE_CALL(cudaMemcpy(h_out, d_out, numRows * sizeof(floatType),cudaMemcpyDeviceToHost)); CUDA_SAFE_CALL(cudaEventRecord(stop, 0)); CUDA_SAFE_CALL(cudaEventSynchronize(stop)); CUDA_SAFE_CALL(cudaEventElapsedTime(&oTransferTime, start, stop)); cudaThreadSynchronize(); vectorKernelTime = (vectorKernelTime / (float)iters) * 1.e-3; string testName = prefix+"CSR-Vector"+suffix; double totalTransfer = iTransferTime + oTransferTime; resultDB->AddResult(testName, atts, "Gflop/s",gflop/(vectorKernelTime)); resultDB->AddResult(testName+"_PCIe", atts, "Gflop/s",gflop / (vectorKernelTime+totalTransfer)); //} } // **************************************************************************** // Function: spmv_csr_scalar_kernel // // Purpose: // Computes sparse matrix - vector multiplication on the GPU using // the CSR data storage format, using a thread per row of the sparse // matrix; based on Bell (SC09) and Baskaran (IBM Tech Report) // // Arguments: // val: array holding the non-zero values for the matrix // cols: array of column indices for each element of the sparse matrix // rowDelimiters: array of size dim+1 holding indices to rows of the matrix // last element is the index one past the last // element of the matrix // dim: number of rows in the matrix // out: output - result from the spmv calculation // // Returns: nothing // out indirectly through a pointer // // Programmer: Lukasz Wesolowski // Creation: June 28, 2010 // // Modifications: // // **************************************************************************** template <typename fpType, typename texReader> __global__ void spmv_csr_scalar_kernel(const fpType * __restrict__ val, const int * __restrict__ cols, const int * __restrict__ rowDelimiters, const int dim, fpType * __restrict__ out) { int myRow = blockIdx.x * blockDim.x + threadIdx.x; texReader vecTexReader; if (myRow < dim) { fpType t = 0.0f; int start = rowDelimiters[myRow]; int end = rowDelimiters[myRow+1]; for (int j = start; j < end; j++) { int col = cols[j]; t += val[j] * vecTexReader(col); #ifdef DARTS_DEBUG if(threadIdx.x <20&&blockIdx.x ==0){ printf("val[%d]=%lf, vecTexReader(%d)=%lf\n",j,val[j],col,vecTexReader(col)); } #endif } out[myRow] = t; } } template <typename fpType, typename texReader> __global__ void spmv_csr_scalar_section_kernel(const fpType * __restrict__ val, const int * __restrict__ cols, const int * __restrict__ rowDelimiters, const int dim, fpType * __restrict__ out, const int secStart) { int myRow = blockIdx.x * blockDim.x + threadIdx.x; texReader vecTexReader; if (myRow < dim) { fpType t = 0.0f; int start = rowDelimiters[myRow]-secStart; int end = rowDelimiters[myRow+1]-secStart; for (int j = start; j < end; j++) { int col = cols[j]; t += val[j] * vecTexReader(col); #ifdef DARTS_DEBUG // if(threadIdx.x <20&&blockIdx.x ==0){ // printf("val[%d]=%lf, vecTexReader(%d)=%lf\n",j,val[j],col,vecTexReader(col)); // } #endif } out[myRow] = t; } } // **************************************************************************** // Function: spmv_csr_vector_kernel // // Purpose: // Computes sparse matrix - vector multiplication on the GPU using // the CSR data storage format, using a warp per row of the sparse // matrix; based on Bell (SC09) and Baskaran (IBM Tech Report) // // Arguments: // val: array holding the non-zero values for the matrix // cols: array of column indices for each element of the sparse matrix // rowDelimiters: array of size dim+1 holding indices to rows of the matrix // last element is the index one past the last // element of the matrix // dim: number of rows in the matrix // out: output - result from the spmv calculation // // Returns: nothing // out indirectly through a pointer // // Programmer: Lukasz Wesolowski // Creation: June 28, 2010 // // Modifications: // // **************************************************************************** template <typename fpType, typename texReader> __global__ void spmv_csr_vector_kernel(const fpType * __restrict__ val, const int * __restrict__ cols, const int * __restrict__ rowDelimiters, const int dim, fpType * __restrict__ out) { // Thread ID in block int t = threadIdx.x; // Thread ID within warp int id = t & (warpSize-1); int warpsPerBlock = blockDim.x / warpSize; // One row per warp int myRow = (blockIdx.x * warpsPerBlock) + (t / warpSize); // Texture reader for the dense vector texReader vecTexReader; __shared__ volatile fpType partialSums[BLOCK_SIZE]; if (myRow < dim) { int warpStart = rowDelimiters[myRow]; int warpEnd = rowDelimiters[myRow+1]; fpType mySum = 0; for (int j = warpStart + id; j < warpEnd; j += warpSize) { int col = cols[j]; mySum += val[j] * vecTexReader(col); } partialSums[t] = mySum; // Reduce partial sums if (id < 16) partialSums[t] += partialSums[t+16]; if (id < 8) partialSums[t] += partialSums[t+ 8]; if (id < 4) partialSums[t] += partialSums[t+ 4]; if (id < 2) partialSums[t] += partialSums[t+ 2]; if (id < 1) partialSums[t] += partialSums[t+ 1]; // Write result if (id == 0) { out[myRow] = partialSums[t]; } } } template <typename fpType, typename texReader> __global__ void spmv_csr_vector_section_kernel(const fpType * __restrict__ val, const int * __restrict__ cols, const int * __restrict__ rowDelimiters, const int dim, fpType * __restrict__ out, const int secStart) { // Thread ID in block int t = threadIdx.x; // Thread ID within warp int id = t & (warpSize-1); int warpsPerBlock = blockDim.x / warpSize; // One row per warp int myRow = (blockIdx.x * warpsPerBlock) + (t / warpSize); // Texture reader for the dense vector texReader vecTexReader; __shared__ volatile fpType partialSums[BLOCK_SIZE]; if (myRow < dim) { int warpStart = rowDelimiters[myRow]-secStart; int warpEnd = rowDelimiters[myRow+1]-secStart; fpType mySum = 0; for (int j = warpStart + id; j < warpEnd; j += warpSize) { int col = cols[j]; mySum += val[j] * vecTexReader(col); } partialSums[t] = mySum; // Reduce partial sums if (id < 16) partialSums[t] += partialSums[t+16]; if (id < 8) partialSums[t] += partialSums[t+ 8]; if (id < 4) partialSums[t] += partialSums[t+ 4]; if (id < 2) partialSums[t] += partialSums[t+ 2]; if (id < 1) partialSums[t] += partialSums[t+ 1]; // Write result if (id == 0) { out[myRow] = partialSums[t]; } } } // **************************************************************************** // Function: spmv_ellpackr_kernel // // Purpose: // Computes sparse matrix - vector multiplication on the GPU using // the ELLPACK-R data storage format; based on Vazquez et al (Univ. of // Almeria Tech Report 2009) // // Arguments: // val: array holding the non-zero values for the matrix in column // major format and padded with zeros up to the length of longest row // cols: array of column indices for each element of the sparse matrix // rowLengths: array storing the length of each row of the sparse matrix // dim: number of rows in the matrix // out: output - result from the spmv calculation // // Returns: nothing directly // out indirectly through a pointer // // Programmer: Lukasz Wesolowski // Creation: June 29, 2010 // // Modifications: // // **************************************************************************** template <typename fpType, typename texReader> __global__ void spmv_ellpackr_kernel(const fpType * __restrict__ val, const int * __restrict__ cols, const int * __restrict__ rowLengths, const int dim, fpType * __restrict__ out) { int t = blockIdx.x * blockDim.x + threadIdx.x; texReader vecTexReader; if (t < dim) { fpType result = 0.0f; int max = rowLengths[t]; for (int i = 0; i < max; i++) { int ind = i*dim+t; result += val[ind] * vecTexReader(cols[ind]); } out[t] = result; } } template <typename fpType> __global__ void zero(fpType * __restrict__ a, const int size) { int t = blockIdx.x * blockDim.x + threadIdx.x; if (t < size) a[t] = 0; } template void csrTestScalar<double>(ResultDatabase* resultDB, OptionParser* op, CSRMM<double> *csrHost, CSRMM<double> *csrDevice ); template void csrTestScalar<int>(ResultDatabase* resultDB, OptionParser* op, CSRMM<int> *csrHost, CSRMM<int> *csrDevice ); template void csrTestVector<double>(ResultDatabase* resultDB, OptionParser* op, CSRMM<double> *csrHost, CSRMM<double> *csrDevice ); template void csrTestVector<int>(ResultDatabase* resultDB, OptionParser* op, CSRMM<int> *csrHost, CSRMM<int> *csrDevice );
8b148f2786662bdaeb477a44f6e0e1226e18ac2b.hip
// !!! This is a file automatically generated by hipify!!! /*************************************************************************************************** * Copyright (c) 2017 - 2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved. * SPDX-License-Identifier: BSD-3-Clause * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are met: * * 1. Redistributions of source code must retain the above copyright notice, this * list of conditions and the following disclaimer. * * 2. Redistributions in binary form must reproduce the above copyright notice, * this list of conditions and the following disclaimer in the documentation * and/or other materials provided with the distribution. * * 3. Neither the name of the copyright holder nor the names of its * contributors may be used to endorse or promote products derived from * this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. * **************************************************************************************************/ /* \file \brief Defines a math function */ #include <algorithm> #include <stdexcept> #include <iomanip> #include <cstring> #include <fstream> #include <sstream> #ifdef __unix__ #include <unistd.h> #elif defined(_WIN32) || defined(WIN32) #include <windows.h> #else // sleep not supported #endif #include "options.h" #include "operation_profiler.h" #include "gpu_timer.h" /////////////////////////////////////////////////////////////////////////////////////////////////// namespace cutlass { namespace profiler { /////////////////////////////////////////////////////////////////////////////////////////////////// OperationProfiler::OperationProfiler(): kind_(library::OperationKind::kInvalid) { } /// Ctor OperationProfiler::OperationProfiler( Options const &options, library::OperationKind kind, ArgumentDescriptionVector const &arguments, ProviderVector const & verification_providers ): kind_(kind), arguments_(arguments) { ArgumentDescriptionVector tile_description_arguments{ {ArgumentTypeID::kEnumerated, {"op_class", "opcode-class"}, "Class of math instruction (simt, tensorop, wmmatensorop, wmma)"}, {ArgumentTypeID::kEnumerated, {"accum", "accumulator-type"}, "Math instruction accumulator data type"}, {ArgumentTypeID::kInteger, {"cta_m", "threadblock-shape::m"}, "Threadblock shape in the M dimension"}, {ArgumentTypeID::kInteger, {"cta_n", "threadblock-shape::n"}, "Threadblock shape in the N dimension"}, {ArgumentTypeID::kInteger, {"cta_k", "threadblock-shape::k"}, "Threadblock shape in the K dimension"}, {ArgumentTypeID::kInteger, {"stages", "threadblock-stages"}, "Number of stages of threadblock-scoped matrix multiply"}, {ArgumentTypeID::kInteger, {"warps_m", "warp-count::m"}, "Number of warps within threadblock along the M dimension"}, {ArgumentTypeID::kInteger, {"warps_n", "warp-count::n"}, "Number of warps within threadblock along the N dimension"}, {ArgumentTypeID::kInteger, {"warps_k", "warp-count::k"}, "Number of warps within threadblock along the K dimension"}, {ArgumentTypeID::kInteger, {"inst_m", "instruction-shape::m"}, "Math instruction shape in the M dimension"}, {ArgumentTypeID::kInteger, {"inst_n", "instruction-shape::n"}, "Math instruction shape in the N dimension"}, {ArgumentTypeID::kInteger, {"inst_k", "instruction-shape::k"}, "Math instruction shape in the K dimension"}, {ArgumentTypeID::kInteger, {"min_cc", "minimum-compute-capability"}, "Minimum device compute capability"}, {ArgumentTypeID::kInteger, {"max_cc", "maximum-compute-capability"}, "Maximum device compute capability"} }; arguments_.insert(arguments_.end(), tile_description_arguments.begin(), tile_description_arguments.end()); for (auto provider : verification_providers) { if (std::find( options.verification.providers.begin(), options.verification.providers.end(), provider) != options.verification.providers.end()) { verification_providers_.push_back(provider); } } } /// Destructor OperationProfiler::~OperationProfiler() { } /// Gets the schema description std::string const & OperationProfiler::description() const { return description_; } /// Prints usage statement for the math function void OperationProfiler::print_usage(std::ostream &out) const { for (auto const & desc : arguments_) { size_t const kAliasStart = 10; size_t columns = 0; std::string type_str = to_string(desc.type); columns += type_str.size(); out << " [" << type_str << "]"; if (columns < kAliasStart) { out << std::string(kAliasStart - columns, ' '); } columns = 0; int j = 0; for (auto const & alias : desc.aliases) { columns += alias.size() + (j ? 1 : 0) + 2; out << (j++ ? "," : "") << "--" << alias; } size_t const kTotalColumns = 50; if (columns < kTotalColumns) { out << std::string(kTotalColumns - columns, ' '); } out << desc.description << "\n"; } } /////////////////////////////////////////////////////////////////////////////////////////////////// /// Returns true if the current operation description satisfies the problem space bool OperationProfiler::satisfies( library::OperationDescription const &op_desc, ProblemSpace const &problem_space, ProblemSpace::Problem const &problem) { library::OpcodeClassID opcode_class; if (arg_as_OpcodeClassID(opcode_class, "op_class", problem_space, problem)) { if (opcode_class != op_desc.tile_description.math_instruction.opcode_class) { return false; } } int64_t int_value; if (arg_as_int(int_value, "inst_m", problem_space, problem)) { if (int64_t(op_desc.tile_description.math_instruction.instruction_shape.m()) != int_value) { return false; } } if (arg_as_int(int_value, "inst_n", problem_space, problem)) { if (int64_t(op_desc.tile_description.math_instruction.instruction_shape.n()) != int_value) { return false; } } if (arg_as_int(int_value, "inst_k", problem_space, problem)) { if (int64_t(op_desc.tile_description.math_instruction.instruction_shape.k()) != int_value) { return false; } } if (arg_as_int(int_value, "cta_m", problem_space, problem)) { if (int64_t(op_desc.tile_description.threadblock_shape.m()) != int_value) { return false; } } if (arg_as_int(int_value, "cta_n", problem_space, problem)) { if (int64_t(op_desc.tile_description.threadblock_shape.n()) != int_value) { return false; } } if (arg_as_int(int_value, "cta_k", problem_space, problem)) { if (int64_t(op_desc.tile_description.threadblock_shape.k()) != int_value) { return false; } } if (arg_as_int(int_value, "stages", problem_space, problem)) { if (int64_t(op_desc.tile_description.threadblock_stages) != int_value) { return false; } } if (arg_as_int(int_value, "warps_m", problem_space, problem)) { if (int64_t(op_desc.tile_description.warp_count.m()) != int_value) { return false; } } if (arg_as_int(int_value, "warps_n", problem_space, problem)) { if (int64_t(op_desc.tile_description.warp_count.n()) != int_value) { return false; } } if (arg_as_int(int_value, "warps_k", problem_space, problem)) { if (int64_t(op_desc.tile_description.warp_count.k()) != int_value) { return false; } } library::NumericTypeID numeric_type; if (arg_as_NumericTypeID(numeric_type, "accum", problem_space, problem)) { if (numeric_type != op_desc.tile_description.math_instruction.element_accumulator) { return false; } } return true; } /////////////////////////////////////////////////////////////////////////////////////////////////// /// Entry point to profile all operations in the manifest int OperationProfiler::profile_all( Options const &options, library::Manifest const &manifest, DeviceContext &device_context) { ProblemSpace problem_space(arguments_, options.cmdline); // 1. Construct performance report PerformanceReport report(options, problem_space.argument_names(), kind_); // 2. For each problem in problem space ProblemSpace::Iterator problem_it = problem_space.begin(); ProblemSpace::Iterator problem_end = problem_space.end(); bool continue_profiling = true, internal_error = false; // For each problem in problem space for (; continue_profiling && problem_it != problem_end; ++problem_it) { ProblemSpace::Problem problem = problem_it.at(); report.next_problem(); // For each operation in manifest for (auto const & operation_ptr : manifest) { library::Operation const *operation = operation_ptr.get(); auto min_cc = operation->description().tile_description.minimum_compute_capability; auto max_cc = operation->description().tile_description.maximum_compute_capability; // Clear named allocations device_context.free(); // Execute compatible cutlass operations if they satisfy the current device's compute capability if (operation->description().kind == kind_ && operation->description().provider == library::Provider::kCUTLASS && options.device.compute_capability() >= min_cc && options.device.compute_capability() <= max_cc) { std::string operation_name(operation->description().name); // Filter kernels by name bool filtered_by_name = options.operation_names.empty(); if (!filtered_by_name) { for (auto const & op_name : options.operation_names) { if (find_string_matches_(op_name, operation_name)) { filtered_by_name = true; break; } } } for (auto const & op_name : options.excluded_operation_names) { if (find_string_matches_(op_name, operation_name)) { filtered_by_name = false; break; } } if (!filtered_by_name || !satisfies(operation->description(), problem_space, problem)) { continue; } // A. Initialize configuration Status status = this->initialize_configuration( options, report, device_context, operation, problem_space, problem); if (status == Status::kErrorInternal) { // If there was an internal error, consume the CUDA error and move to the next operation. (void)hipGetLastError(); report.append_results(results_); continue; } else if (status != Status::kSuccess) { // If the workspace could not be initialized for any other reason, continue to // the next operation. continue; } if (continue_profiling) { status = this->initialize_workspace( options, report, device_context, operation, problem_space, problem); if (status == Status::kErrorInternal) { // If there was an internal error, consume the CUDA error and move to the next operation. (void)hipGetLastError(); report.append_results(results_); continue; } else if (status != Status::kSuccess) { // If the workspace could not be initialized for any other reason, continue to // the next operation. continue; } } // // Profile CUTLASS if it is enabled // // B. Verify CUTLASS if (continue_profiling && options.profiling.provider_enabled(library::Provider::kCUTLASS)) { continue_profiling = this->verify_cutlass( options, report, device_context, operation, problem_space, problem); } if (options.execution_mode == ExecutionMode::kDryRun) { report.append_results(results_); results_.clear(); continue; } // // C. Optionally save workspace // if (options.verification.save_workspace == SaveWorkspace::kAlways) { save_workspace( device_context, options, operation->description(), library::Provider::kCUTLASS); } // // D. Profile // if (continue_profiling && options.profiling.enabled) { continue_profiling = this->profile( options, report, device_context, operation, problem_space, problem); } report.append_results(results_); results_.clear(); } if (!continue_profiling) { break; } } } return internal_error ? 1 : 0; } /////////////////////////////////////////////////////////////////////////////////////////////////// /// Sleep for a given duration in ms void OperationProfiler::sleep(int sleep_duration) { if (sleep_duration) { #ifdef __unix__ usleep(sleep_duration * 1000); #elif defined(_WIN32) || defined(WIN32) SleepEx(sleep_duration, false); #else // sleep not supported #endif } } /// Compares tensors for equality Disposition OperationProfiler::compare_tensors( Options const &options, DeviceAllocation &experimental, DeviceAllocation &reference, int64_t count) { if (experimental.type() != reference.type()) { return Disposition::kIncorrect; } bool passed = false; if (count == 0) { count = reference.capacity(); } if (options.verification.epsilon == 0) { // bit-level equality passed = DeviceAllocation::block_compare_equal( experimental.type(), experimental.data(), reference.data(), count); } else { // relative error function passed = DeviceAllocation::block_compare_relatively_equal( experimental.type(), experimental.data(), reference.data(), count, options.verification.epsilon, options.verification.nonzero_floor); } return passed ? Disposition::kPassed : Disposition::kIncorrect; } /// Saves the workspace void OperationProfiler::save_workspace( DeviceContext &device_context, Options const &options, library::OperationDescription const &desc, library::Provider provider, library::Provider verification_provider) { for (auto const & named_allocation : device_context) { DeviceAllocation *allocation = named_allocation.second; std::stringstream filename; filename << desc.name << "_" << library::to_string(provider) << "_"; if (verification_provider != library::Provider::kInvalid) { filename << "verified_by_" << library::to_string(verification_provider) << "_"; } filename << named_allocation.first + ".mat"; std::ofstream out(filename.str()); allocation->write_tensor_csv(out); out << "\n"; if (options.report.verbose) { std::cout << "wrote '" << filename.str() << "'" << std::endl; } } } /////////////////////////////////////////////////////////////////////////////////////////////////// /// Method to profile a CUTLASS Operation Status OperationProfiler::profile_cutlass_( double &runtime, Options const &options, library::Operation const *operation, void *arguments, void *host_workspace, void *device_workspace) { GpuTimer timer; // // Optional sleep to limit power consumption and thermals // sleep(options.profiling.sleep_duration); // // Warmup loop // Status status; for (int iteration = 0; iteration < options.profiling.warmup_iterations; ++iteration) { status = operation->run( arguments, host_workspace, device_workspace); if (status != Status::kSuccess) { return status; } } // // Initialize GPU timer // timer.start(); // // Profiling loop // int Iterations = options.profiling.iterations; int iteration = 0; for (; iteration < Iterations; ++iteration) { status = operation->run( arguments, host_workspace, device_workspace); if (status != Status::kSuccess) { return status; } } // // Wait for completion // timer.stop_and_wait(); // // Update performance result // runtime = timer.duration(iteration); return status; } /////////////////////////////////////////////////////////////////////////////////////////////////// /// Sets operation description void OperationProfiler::initialize_result_( PerformanceResult &result, library::OperationDescription const &operation_desc, ProblemSpace const &problem_space) { set_argument(result, "op_class", problem_space, library::to_string(operation_desc.tile_description.math_instruction.opcode_class)); set_argument(result, "accum", problem_space, library::to_string(operation_desc.tile_description.math_instruction.element_accumulator)); set_argument(result, "cta_m", problem_space, operation_desc.tile_description.threadblock_shape.m()); set_argument(result, "cta_n", problem_space, operation_desc.tile_description.threadblock_shape.n()); set_argument(result, "cta_k", problem_space, operation_desc.tile_description.threadblock_shape.k()); set_argument(result, "stages", problem_space, operation_desc.tile_description.threadblock_stages); set_argument(result, "warps_m", problem_space, operation_desc.tile_description.warp_count.m()); set_argument(result, "warps_n", problem_space, operation_desc.tile_description.warp_count.n()); set_argument(result, "warps_k", problem_space, operation_desc.tile_description.warp_count.k()); set_argument(result, "inst_m", problem_space, operation_desc.tile_description.math_instruction.instruction_shape.m()); set_argument(result, "inst_n", problem_space, operation_desc.tile_description.math_instruction.instruction_shape.n()); set_argument(result, "inst_k", problem_space, operation_desc.tile_description.math_instruction.instruction_shape.k()); set_argument(result, "min_cc", problem_space, operation_desc.tile_description.minimum_compute_capability); set_argument(result, "max_cc", problem_space, operation_desc.tile_description.maximum_compute_capability); } /// Helper void OperationProfiler::set_argument( PerformanceResult &result, char const *name, ProblemSpace const &problem_space, std::string const &value) { result.arguments.at(problem_space.argument_index(name)) = make_pair(std::string(name), value); } void OperationProfiler::set_argument( PerformanceResult &result, char const *name, ProblemSpace const &problem_space, int64_t value) { result.arguments.at(problem_space.argument_index(name)) = make_pair(std::string(name), library::lexical_cast(value)); } /// finds string matches filter_string in operation_name bool OperationProfiler::find_string_matches_( std::string const &filter_string, std::string const &operation_name) { // Returns true if all substrings appear in the operation_name in order // Split filter_string of the format "gemm*f32*nt" to tokens ["gemm", "f32", "nt"] std::string item; std::istringstream iss(filter_string); std::vector<std::string> filter_tokens; while (std::getline(iss, item, '*')) { filter_tokens.push_back(item); } // Search filter_tokens in operation_name in order size_t start = 0, idx = 0; for(auto & token : filter_tokens) { // Check if characters left to be parsed in operation_name if (start < operation_name.length()) { // Find token in operation_name[start:] idx = operation_name.substr(start).find(token); if (idx == std::string::npos) { return false; } } start += (idx + token.length()); } // All tokens in filter_string found in operation_name return true; } /////////////////////////////////////////////////////////////////////////////////////////////////// } // namespace profiler } // namespace cutlass ///////////////////////////////////////////////////////////////////////////////////////////////////
8b148f2786662bdaeb477a44f6e0e1226e18ac2b.cu
/*************************************************************************************************** * Copyright (c) 2017 - 2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved. * SPDX-License-Identifier: BSD-3-Clause * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are met: * * 1. Redistributions of source code must retain the above copyright notice, this * list of conditions and the following disclaimer. * * 2. Redistributions in binary form must reproduce the above copyright notice, * this list of conditions and the following disclaimer in the documentation * and/or other materials provided with the distribution. * * 3. Neither the name of the copyright holder nor the names of its * contributors may be used to endorse or promote products derived from * this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. * **************************************************************************************************/ /* \file \brief Defines a math function */ #include <algorithm> #include <stdexcept> #include <iomanip> #include <cstring> #include <fstream> #include <sstream> #ifdef __unix__ #include <unistd.h> #elif defined(_WIN32) || defined(WIN32) #include <windows.h> #else // sleep not supported #endif #include "options.h" #include "operation_profiler.h" #include "gpu_timer.h" /////////////////////////////////////////////////////////////////////////////////////////////////// namespace cutlass { namespace profiler { /////////////////////////////////////////////////////////////////////////////////////////////////// OperationProfiler::OperationProfiler(): kind_(library::OperationKind::kInvalid) { } /// Ctor OperationProfiler::OperationProfiler( Options const &options, library::OperationKind kind, ArgumentDescriptionVector const &arguments, ProviderVector const & verification_providers ): kind_(kind), arguments_(arguments) { ArgumentDescriptionVector tile_description_arguments{ {ArgumentTypeID::kEnumerated, {"op_class", "opcode-class"}, "Class of math instruction (simt, tensorop, wmmatensorop, wmma)"}, {ArgumentTypeID::kEnumerated, {"accum", "accumulator-type"}, "Math instruction accumulator data type"}, {ArgumentTypeID::kInteger, {"cta_m", "threadblock-shape::m"}, "Threadblock shape in the M dimension"}, {ArgumentTypeID::kInteger, {"cta_n", "threadblock-shape::n"}, "Threadblock shape in the N dimension"}, {ArgumentTypeID::kInteger, {"cta_k", "threadblock-shape::k"}, "Threadblock shape in the K dimension"}, {ArgumentTypeID::kInteger, {"stages", "threadblock-stages"}, "Number of stages of threadblock-scoped matrix multiply"}, {ArgumentTypeID::kInteger, {"warps_m", "warp-count::m"}, "Number of warps within threadblock along the M dimension"}, {ArgumentTypeID::kInteger, {"warps_n", "warp-count::n"}, "Number of warps within threadblock along the N dimension"}, {ArgumentTypeID::kInteger, {"warps_k", "warp-count::k"}, "Number of warps within threadblock along the K dimension"}, {ArgumentTypeID::kInteger, {"inst_m", "instruction-shape::m"}, "Math instruction shape in the M dimension"}, {ArgumentTypeID::kInteger, {"inst_n", "instruction-shape::n"}, "Math instruction shape in the N dimension"}, {ArgumentTypeID::kInteger, {"inst_k", "instruction-shape::k"}, "Math instruction shape in the K dimension"}, {ArgumentTypeID::kInteger, {"min_cc", "minimum-compute-capability"}, "Minimum device compute capability"}, {ArgumentTypeID::kInteger, {"max_cc", "maximum-compute-capability"}, "Maximum device compute capability"} }; arguments_.insert(arguments_.end(), tile_description_arguments.begin(), tile_description_arguments.end()); for (auto provider : verification_providers) { if (std::find( options.verification.providers.begin(), options.verification.providers.end(), provider) != options.verification.providers.end()) { verification_providers_.push_back(provider); } } } /// Destructor OperationProfiler::~OperationProfiler() { } /// Gets the schema description std::string const & OperationProfiler::description() const { return description_; } /// Prints usage statement for the math function void OperationProfiler::print_usage(std::ostream &out) const { for (auto const & desc : arguments_) { size_t const kAliasStart = 10; size_t columns = 0; std::string type_str = to_string(desc.type); columns += type_str.size(); out << " [" << type_str << "]"; if (columns < kAliasStart) { out << std::string(kAliasStart - columns, ' '); } columns = 0; int j = 0; for (auto const & alias : desc.aliases) { columns += alias.size() + (j ? 1 : 0) + 2; out << (j++ ? "," : "") << "--" << alias; } size_t const kTotalColumns = 50; if (columns < kTotalColumns) { out << std::string(kTotalColumns - columns, ' '); } out << desc.description << "\n"; } } /////////////////////////////////////////////////////////////////////////////////////////////////// /// Returns true if the current operation description satisfies the problem space bool OperationProfiler::satisfies( library::OperationDescription const &op_desc, ProblemSpace const &problem_space, ProblemSpace::Problem const &problem) { library::OpcodeClassID opcode_class; if (arg_as_OpcodeClassID(opcode_class, "op_class", problem_space, problem)) { if (opcode_class != op_desc.tile_description.math_instruction.opcode_class) { return false; } } int64_t int_value; if (arg_as_int(int_value, "inst_m", problem_space, problem)) { if (int64_t(op_desc.tile_description.math_instruction.instruction_shape.m()) != int_value) { return false; } } if (arg_as_int(int_value, "inst_n", problem_space, problem)) { if (int64_t(op_desc.tile_description.math_instruction.instruction_shape.n()) != int_value) { return false; } } if (arg_as_int(int_value, "inst_k", problem_space, problem)) { if (int64_t(op_desc.tile_description.math_instruction.instruction_shape.k()) != int_value) { return false; } } if (arg_as_int(int_value, "cta_m", problem_space, problem)) { if (int64_t(op_desc.tile_description.threadblock_shape.m()) != int_value) { return false; } } if (arg_as_int(int_value, "cta_n", problem_space, problem)) { if (int64_t(op_desc.tile_description.threadblock_shape.n()) != int_value) { return false; } } if (arg_as_int(int_value, "cta_k", problem_space, problem)) { if (int64_t(op_desc.tile_description.threadblock_shape.k()) != int_value) { return false; } } if (arg_as_int(int_value, "stages", problem_space, problem)) { if (int64_t(op_desc.tile_description.threadblock_stages) != int_value) { return false; } } if (arg_as_int(int_value, "warps_m", problem_space, problem)) { if (int64_t(op_desc.tile_description.warp_count.m()) != int_value) { return false; } } if (arg_as_int(int_value, "warps_n", problem_space, problem)) { if (int64_t(op_desc.tile_description.warp_count.n()) != int_value) { return false; } } if (arg_as_int(int_value, "warps_k", problem_space, problem)) { if (int64_t(op_desc.tile_description.warp_count.k()) != int_value) { return false; } } library::NumericTypeID numeric_type; if (arg_as_NumericTypeID(numeric_type, "accum", problem_space, problem)) { if (numeric_type != op_desc.tile_description.math_instruction.element_accumulator) { return false; } } return true; } /////////////////////////////////////////////////////////////////////////////////////////////////// /// Entry point to profile all operations in the manifest int OperationProfiler::profile_all( Options const &options, library::Manifest const &manifest, DeviceContext &device_context) { ProblemSpace problem_space(arguments_, options.cmdline); // 1. Construct performance report PerformanceReport report(options, problem_space.argument_names(), kind_); // 2. For each problem in problem space ProblemSpace::Iterator problem_it = problem_space.begin(); ProblemSpace::Iterator problem_end = problem_space.end(); bool continue_profiling = true, internal_error = false; // For each problem in problem space for (; continue_profiling && problem_it != problem_end; ++problem_it) { ProblemSpace::Problem problem = problem_it.at(); report.next_problem(); // For each operation in manifest for (auto const & operation_ptr : manifest) { library::Operation const *operation = operation_ptr.get(); auto min_cc = operation->description().tile_description.minimum_compute_capability; auto max_cc = operation->description().tile_description.maximum_compute_capability; // Clear named allocations device_context.free(); // Execute compatible cutlass operations if they satisfy the current device's compute capability if (operation->description().kind == kind_ && operation->description().provider == library::Provider::kCUTLASS && options.device.compute_capability() >= min_cc && options.device.compute_capability() <= max_cc) { std::string operation_name(operation->description().name); // Filter kernels by name bool filtered_by_name = options.operation_names.empty(); if (!filtered_by_name) { for (auto const & op_name : options.operation_names) { if (find_string_matches_(op_name, operation_name)) { filtered_by_name = true; break; } } } for (auto const & op_name : options.excluded_operation_names) { if (find_string_matches_(op_name, operation_name)) { filtered_by_name = false; break; } } if (!filtered_by_name || !satisfies(operation->description(), problem_space, problem)) { continue; } // A. Initialize configuration Status status = this->initialize_configuration( options, report, device_context, operation, problem_space, problem); if (status == Status::kErrorInternal) { // If there was an internal error, consume the CUDA error and move to the next operation. (void)cudaGetLastError(); report.append_results(results_); continue; } else if (status != Status::kSuccess) { // If the workspace could not be initialized for any other reason, continue to // the next operation. continue; } if (continue_profiling) { status = this->initialize_workspace( options, report, device_context, operation, problem_space, problem); if (status == Status::kErrorInternal) { // If there was an internal error, consume the CUDA error and move to the next operation. (void)cudaGetLastError(); report.append_results(results_); continue; } else if (status != Status::kSuccess) { // If the workspace could not be initialized for any other reason, continue to // the next operation. continue; } } // // Profile CUTLASS if it is enabled // // B. Verify CUTLASS if (continue_profiling && options.profiling.provider_enabled(library::Provider::kCUTLASS)) { continue_profiling = this->verify_cutlass( options, report, device_context, operation, problem_space, problem); } if (options.execution_mode == ExecutionMode::kDryRun) { report.append_results(results_); results_.clear(); continue; } // // C. Optionally save workspace // if (options.verification.save_workspace == SaveWorkspace::kAlways) { save_workspace( device_context, options, operation->description(), library::Provider::kCUTLASS); } // // D. Profile // if (continue_profiling && options.profiling.enabled) { continue_profiling = this->profile( options, report, device_context, operation, problem_space, problem); } report.append_results(results_); results_.clear(); } if (!continue_profiling) { break; } } } return internal_error ? 1 : 0; } /////////////////////////////////////////////////////////////////////////////////////////////////// /// Sleep for a given duration in ms void OperationProfiler::sleep(int sleep_duration) { if (sleep_duration) { #ifdef __unix__ usleep(sleep_duration * 1000); #elif defined(_WIN32) || defined(WIN32) SleepEx(sleep_duration, false); #else // sleep not supported #endif } } /// Compares tensors for equality Disposition OperationProfiler::compare_tensors( Options const &options, DeviceAllocation &experimental, DeviceAllocation &reference, int64_t count) { if (experimental.type() != reference.type()) { return Disposition::kIncorrect; } bool passed = false; if (count == 0) { count = reference.capacity(); } if (options.verification.epsilon == 0) { // bit-level equality passed = DeviceAllocation::block_compare_equal( experimental.type(), experimental.data(), reference.data(), count); } else { // relative error function passed = DeviceAllocation::block_compare_relatively_equal( experimental.type(), experimental.data(), reference.data(), count, options.verification.epsilon, options.verification.nonzero_floor); } return passed ? Disposition::kPassed : Disposition::kIncorrect; } /// Saves the workspace void OperationProfiler::save_workspace( DeviceContext &device_context, Options const &options, library::OperationDescription const &desc, library::Provider provider, library::Provider verification_provider) { for (auto const & named_allocation : device_context) { DeviceAllocation *allocation = named_allocation.second; std::stringstream filename; filename << desc.name << "_" << library::to_string(provider) << "_"; if (verification_provider != library::Provider::kInvalid) { filename << "verified_by_" << library::to_string(verification_provider) << "_"; } filename << named_allocation.first + ".mat"; std::ofstream out(filename.str()); allocation->write_tensor_csv(out); out << "\n"; if (options.report.verbose) { std::cout << "wrote '" << filename.str() << "'" << std::endl; } } } /////////////////////////////////////////////////////////////////////////////////////////////////// /// Method to profile a CUTLASS Operation Status OperationProfiler::profile_cutlass_( double &runtime, Options const &options, library::Operation const *operation, void *arguments, void *host_workspace, void *device_workspace) { GpuTimer timer; // // Optional sleep to limit power consumption and thermals // sleep(options.profiling.sleep_duration); // // Warmup loop // Status status; for (int iteration = 0; iteration < options.profiling.warmup_iterations; ++iteration) { status = operation->run( arguments, host_workspace, device_workspace); if (status != Status::kSuccess) { return status; } } // // Initialize GPU timer // timer.start(); // // Profiling loop // int Iterations = options.profiling.iterations; int iteration = 0; for (; iteration < Iterations; ++iteration) { status = operation->run( arguments, host_workspace, device_workspace); if (status != Status::kSuccess) { return status; } } // // Wait for completion // timer.stop_and_wait(); // // Update performance result // runtime = timer.duration(iteration); return status; } /////////////////////////////////////////////////////////////////////////////////////////////////// /// Sets operation description void OperationProfiler::initialize_result_( PerformanceResult &result, library::OperationDescription const &operation_desc, ProblemSpace const &problem_space) { set_argument(result, "op_class", problem_space, library::to_string(operation_desc.tile_description.math_instruction.opcode_class)); set_argument(result, "accum", problem_space, library::to_string(operation_desc.tile_description.math_instruction.element_accumulator)); set_argument(result, "cta_m", problem_space, operation_desc.tile_description.threadblock_shape.m()); set_argument(result, "cta_n", problem_space, operation_desc.tile_description.threadblock_shape.n()); set_argument(result, "cta_k", problem_space, operation_desc.tile_description.threadblock_shape.k()); set_argument(result, "stages", problem_space, operation_desc.tile_description.threadblock_stages); set_argument(result, "warps_m", problem_space, operation_desc.tile_description.warp_count.m()); set_argument(result, "warps_n", problem_space, operation_desc.tile_description.warp_count.n()); set_argument(result, "warps_k", problem_space, operation_desc.tile_description.warp_count.k()); set_argument(result, "inst_m", problem_space, operation_desc.tile_description.math_instruction.instruction_shape.m()); set_argument(result, "inst_n", problem_space, operation_desc.tile_description.math_instruction.instruction_shape.n()); set_argument(result, "inst_k", problem_space, operation_desc.tile_description.math_instruction.instruction_shape.k()); set_argument(result, "min_cc", problem_space, operation_desc.tile_description.minimum_compute_capability); set_argument(result, "max_cc", problem_space, operation_desc.tile_description.maximum_compute_capability); } /// Helper void OperationProfiler::set_argument( PerformanceResult &result, char const *name, ProblemSpace const &problem_space, std::string const &value) { result.arguments.at(problem_space.argument_index(name)) = make_pair(std::string(name), value); } void OperationProfiler::set_argument( PerformanceResult &result, char const *name, ProblemSpace const &problem_space, int64_t value) { result.arguments.at(problem_space.argument_index(name)) = make_pair(std::string(name), library::lexical_cast(value)); } /// finds string matches filter_string in operation_name bool OperationProfiler::find_string_matches_( std::string const &filter_string, std::string const &operation_name) { // Returns true if all substrings appear in the operation_name in order // Split filter_string of the format "gemm*f32*nt" to tokens ["gemm", "f32", "nt"] std::string item; std::istringstream iss(filter_string); std::vector<std::string> filter_tokens; while (std::getline(iss, item, '*')) { filter_tokens.push_back(item); } // Search filter_tokens in operation_name in order size_t start = 0, idx = 0; for(auto & token : filter_tokens) { // Check if characters left to be parsed in operation_name if (start < operation_name.length()) { // Find token in operation_name[start:] idx = operation_name.substr(start).find(token); if (idx == std::string::npos) { return false; } } start += (idx + token.length()); } // All tokens in filter_string found in operation_name return true; } /////////////////////////////////////////////////////////////////////////////////////////////////// } // namespace profiler } // namespace cutlass ///////////////////////////////////////////////////////////////////////////////////////////////////
b7e4ac95903b232794a424c35ac0fe4797696a01.hip
// !!! This is a file automatically generated by hipify!!! /*----------------------------------------------------------------------------------* * Copyright (c) 2010-2018 Pauli Parkkinen, Eelis Solala, Wen-Hua Xu, * * Sergio Losilla, Elias Toivanen, Jonas Juselius * * * * Permission is hereby granted, free of charge, to any person obtaining a copy * * of this software and associated documentation files (the "Software"), to deal * * in the Software without restriction, including without limitation the rights * * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell * * copies of the Software, and to permit persons to whom the Software is * * furnished to do so, subject to the following conditions: * * * * The above copyright notice and this permission notice shall be included in all* * copies or substantial portions of the Software. * * * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE * * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER * * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, * * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE * * SOFTWARE. * *----------------------------------------------------------------------------------*/ /*! @file spherical_harmonics_cuda.cu *! @brief CUDA implementation of the spherical harmonics evaluation. */ #include "streamcontainer.h" #include "integrator.h" #include "spherical_harmonics_cuda.h" #include "bubbles_cuda.h" #include "grid.h" #include "cube.h" #include "memory_leak_operators.h" #include <hip/hip_runtime.h> #include <stdlib.h> #include <stdio.h> #define X_ 0 #define Y_ 1 #define Z_ 2 #define R_ 3 #define TERM_COUNT 10 #define MAX_TERM_COUNT 300 /** \brief Size of the CUDA blocks in the X dimension */ #define BLOCKDIMX 8 /** \brief Size of the CUDA blocks in the Y dimension */ #define BLOCKDIMY 4 /** \brief Size of the CUDA blocks in the Z dimension */ #define BLOCKDIMZ 4 #define BLOCKSZ 64 hipError_t cudaErrorStat; __constant__ int shape_x_, shape_y_, shape_z_, lmax_, ilmmin_, lmin_, ilmmax_, first_term_, normalization_, ijk_max_; inline __device__ void calc_distance(double *dist_vec_x, double *dist_vec_y, double *dist_vec_z, double *dist, const double reference_point_x, const double reference_point_y, const double reference_point_z, const double x, const double y, const double z){ // calculate the vector relative to reference_point *dist_vec_x=x-reference_point_x; *dist_vec_y=y-reference_point_y; *dist_vec_z=z-reference_point_z; // evaluate the length of the dist_vector, i.e., the distance between dist_vec and reference_point *dist=sqrt((*dist_vec_x) * (*dist_vec_x)+ (*dist_vec_y) * (*dist_vec_y)+ (*dist_vec_z) * (*dist_vec_z)); return; } __device__ void RealSphericalHarmonics_evaluate_point_simple(const double x, const double y, const double z, const double r, const int lmax, const int lm_address_difference, double *result) { int lm_address =0, address2 = 0; int l, m, l2; double top = 0.0, bottom = 0.0, new_bottom = 0.0, prev1 = 0.0, prev2 = 0.0, current = 0.0; //double r2 = x*x+y*y+z*z; // set value for l=0, m=0 result[lm_address] = 1.0; // set value for l=1, m=-1 lm_address += lm_address_difference; result[lm_address] = y / r; // set all values where m=-1 m = -1; prev1 = y / r; // the starting address has 1 item before from the l=0, 3 from l=1, and 1 from l=2 address2 = 5 * lm_address_difference; for (l = 2; l <= lmax; l++) { current = ( 2.0*(double)l-1.0) / sqrt( 1.0*(double)((l+m)*(l-m)) ) * z*prev1 / r; if (l > 2) { current -= sqrt( (double)((l+m-1)*(l-m-1)) / (double)((l+m)*(l-m)) ) * prev2; } prev2 = prev1; prev1 = current; result[address2] = current; // add the address2 to get to the next item with m=0 address2 += lm_address_difference * (2*l+2); } // set value for l=1, m=0 lm_address += lm_address_difference; result[lm_address] = z / r; // set all values where m=0 prev1 = z / r; prev2 = 1.0; m = 0; // the starting address has 1 item before from the l=0, 3 from l=1, and 2 from l=2 address2 = 6 * lm_address_difference; for (l = 2; l <= lmax; l++) { current = ( 2.0*(double)l-1.0) / sqrt( 1.0*(double)((l+m)*(l-m)) ) * z * prev1 / r; current -= sqrt( (double)((l+m-1)*(l-m-1)) / (double)((l+m)*(l-m)) ) * prev2; prev2 = prev1; prev1 = current; result[address2] = current; // add the address2 to get to the next item with m=0 address2 += lm_address_difference * (2*l+2); } // set value for l=1, m=1 lm_address += lm_address_difference; result[lm_address] = x / r; // set all values where m=1 prev1 = x / r; m = 1; // the starting address has 1 item before from the l=0, 3 from l=1, and 3 from l=2 address2 = 7 * lm_address_difference; for (l = 2; l <= lmax; l++) { current = ( 2.0*(double)l-1.0) / sqrt( 1.0*(double)((l+m)*(l-m)) ) * z*prev1 / r; if (l > 2) { current -= sqrt( (double)((l+m-1)*(l-m-1)) / (double)((l+m)*(l-m)) ) * prev2; } prev2 = prev1; prev1 = current; result[address2] = current; // add the address2 to get to the next item with m=0 address2 += lm_address_difference * (2*l+2); } // go through the rest of the stuff bottom = y / r; // bottom refers to real spherical harmonics value with l=l-1 and m=-(l-1) top = x / r; // top refers to real spherical harmonics value with l=l-1 and m=l-1 lm_address += lm_address_difference; for (l=2; l <= lmax; l++) { new_bottom = sqrt((2.0*(double)l - 1.0) / (2.0*(double)l)) * ( y*top + x*bottom) / r; result[lm_address] = new_bottom; // set all values where m=-l m = -l; prev1 = new_bottom; address2 = lm_address + (2*l+2) * lm_address_difference; for (l2 = l+1; l2 <= lmax; l2++) { current = ( 2.0*(double)l2-1.0) / sqrt( 1.0*(double)((l2+m)*(l2-m)) ) * z*prev1 / r; if (l2 > l+1) { current -= sqrt( (double)((l2+m-1)*(l2-m-1)) / (double)((l2+m)*(l2-m)) ) * prev2; } prev2 = prev1; prev1 = current; result[address2] = current; // add the address2 to get to the next item with m=l address2 += lm_address_difference * (2*l2+2); } // get value for l=l, m=l. The address is 2*l items away from l=l, m=-l lm_address += 2*l*lm_address_difference; top = sqrt((2.0*(double)l - 1.0) / (2.0*(double)l)) * ( x*top-y*bottom ) / r; // set all values where m=l m = l; prev1 = top; address2 = lm_address + (2*l+2) * lm_address_difference; for (l2 = l+1; l2 <= lmax; l2++) { current = ( 2.0*(double)l2-1.0) / sqrt( 1.0*(double)((l2+m)*(l2-m)) ) * z*prev1 / r; if (l2 > l+1) { current -= sqrt( (double)((l2+m-1)*(l2-m-1)) / (double)((l2+m)*(l2-m)) ) * prev2; } prev2 = prev1; prev1 = current; result[address2] = current; // add the address2 to get to the next item with m=l address2 += lm_address_difference * (2*l2+2); } // store the new bottom: l=l, m=-l (we need the old bottom in calculation of top) bottom = new_bottom; result[lm_address] = top; // get next address lm_address += lm_address_difference; } } __device__ void RealRegularSolidHarmonics_evaluate_point_simple(const double x, const double y, const double z, const int lmax, const int lm_address_difference, double *result) { int lm_address =0, address2 = 0; int l, m, l2; double top = 0.0, bottom = 0.0, new_bottom = 0.0, prev1 = 0.0, prev2 = 0.0, current = 0.0; double r2 = x*x+y*y+z*z; // set value for l=0, m=0 result[lm_address] = 1.0; // set value for l=1, m=-1 lm_address += lm_address_difference; result[lm_address] = y; // set all values where m=-1 m = -1; prev1 = y; // the starting address has 1 item before from the l=0, 3 from l=1, and 1 from l=2 address2 = 5 * lm_address_difference; for (l = 2; l <= lmax; l++) { current = ( 2.0*(double)l-1.0) / sqrt( 1.0*(double)((l+m)*(l-m)) ) * z*prev1; if (l > 2) { current -= sqrt( (double)((l+m-1)*(l-m-1)) / (double)((l+m)*(l-m)) ) * r2 * prev2; } prev2 = prev1; prev1 = current; result[address2] = current; // add the address2 to get to the next item with m=0 address2 += lm_address_difference * (2*l+2); } // set value for l=1, m=0 lm_address += lm_address_difference; result[lm_address] = z; // set all values where m=0 prev1 = z; prev2 = 1.0; m = 0; // the starting address has 1 item before from the l=0, 3 from l=1, and 2 from l=2 address2 = 6 * lm_address_difference; for (l = 2; l <= lmax; l++) { current = ( 2.0*(double)l-1.0) / sqrt( 1.0*(double)((l+m)*(l-m)) ) * z * prev1; current -= sqrt( (double)((l+m-1)*(l-m-1)) / (double)((l+m)*(l-m)) ) * r2 * prev2; prev2 = prev1; prev1 = current; result[address2] = current; // add the address2 to get to the next item with m=0 address2 += lm_address_difference * (2*l+2); } // set value for l=1, m=1 lm_address += lm_address_difference; result[lm_address] = x; // set all values where m=1 prev1 = x; m = 1; // the starting address has 1 item before from the l=0, 3 from l=1, and 3 from l=2 address2 = 7 * lm_address_difference; for (l = 2; l <= lmax; l++) { current = ( 2.0*(double)l-1.0) / sqrt( 1.0*(double)((l+m)*(l-m)) ) * z*prev1; if (l > 2) { current -= sqrt( (double)((l+m-1)*(l-m-1)) / (double)((l+m)*(l-m)) ) * r2 * prev2; } prev2 = prev1; prev1 = current; result[address2] = current; // add the address2 to get to the next item with m=0 address2 += lm_address_difference * (2*l+2); } // go through the rest of the stuff bottom = y; // bottom refers to solid harmonics value with l=l-1 and m=-(l-1) top = x; // top refers to solid harmonics value with l=l-1 and m=l-1 lm_address += lm_address_difference; for (l=2; l <= lmax; l++) { new_bottom = sqrt((2.0*(double)l - 1.0) / (2.0*(double)l)) * ( y*top + x*bottom); result[lm_address] = new_bottom; // set all values where m=-l m = -l; prev1 = new_bottom; address2 = lm_address + (2*l+2) * lm_address_difference; for (l2 = l+1; l2 <= lmax; l2++) { current = ( 2.0*(double)l2-1.0) / sqrt( 1.0*(double)((l2+m)*(l2-m)) ) * z*prev1; if (l2 > l+1) { current -= sqrt( (double)((l2+m-1)*(l2-m-1)) / (double)((l2+m)*(l2-m)) ) * r2 * prev2; } prev2 = prev1; prev1 = current; result[address2] = current; // add the address2 to get to the next item with m=l address2 += lm_address_difference * (2*l2+2); } // get value for l=l, m=l. The address is 2*l items away from l=l, m=-l lm_address += 2*l*lm_address_difference; top = sqrt((2.0*(double)l - 1.0) / (2.0*(double)l)) * ( x*top-y*bottom ); // set all values where m=l m = l; prev1 = top; address2 = lm_address + (2*l+2) * lm_address_difference; for (l2 = l+1; l2 <= lmax; l2++) { current = ( 2.0*(double)l2-1.0) / sqrt( 1.0*(double)((l2+m)*(l2-m)) ) * z*prev1; if (l2 > l+1) { current -= sqrt( (double)((l2+m-1)*(l2-m-1)) / (double)((l2+m)*(l2-m)) ) * r2 * prev2; } prev2 = prev1; prev1 = current; result[address2] = current; // add the address2 to get to the next item with m=l address2 += lm_address_difference * (2*l2+2); } // store the new bottom: l=l, m=-l (we need the old bottom in calculation of top) bottom = new_bottom; result[lm_address] = top; // get next address lm_address += lm_address_difference; } } __device__ inline void getXYZ3D(int *x, int *y, int *z) { *x = blockIdx.x * blockDim.x + threadIdx.x; *y = blockIdx.y * blockDim.y + threadIdx.y; *z = blockIdx.z * blockDim.z + threadIdx.z; } /*__global__ void RealRegularSolidHarmonics_evaluate_3d( int shape_x, int shape_y, shape_z, int slice_offset, int slice_count, int pitch, int memory_shape_y, int memory_shape_z, double *cubes, const double zero_point_x, const double zero_point_y, const double zero_point_z, const int lmin, const int lmax) { // get the id of the point (We are using only the first ) int x, y, z; getXYZ3D(&x, &y, &z); z += slice_offset; // Check that the point is within the block & is within the handled slices if (x < shape_x && y < shape_y && z < shape_z && z < slice_count) { double relative_position_x, relative_position_y, relative_position_z, distance = 0.0; // calculate relative position to the zero-point and distance to it calc_distance(&relative_position_x, &relative_position_y, &relative_position_z, &distance, zero_point_x, zero_point_y, zero_point_z, grid->grid_points_x[x], grid->grid_points_y[y], grid->grid_points_z[z]); // calculate the solid harmonic value for the point RealRegularSolidHarmonics_evaluate_point_simple(relative_position_x, relative_position_y, relative_position_z, lmax, (int) pitch * memory_shape_y * memory_shape_z, &cubes[id]); } return; }*/ __global__ void RealRegularSolidCubeHarmonics_evaluate_grid( const int shape_x, const int shape_y, const int shape_z, const double *gridpoints_x, const double *gridpoints_y, const double *gridpoints_z, const int lmax, double *cubes, const size_t pitch, const double zero_point_x, const double zero_point_y, const double zero_point_z, const int slice_offset, // the number of slices handled by this kernel call const int slice_count, // the number of slices that resides in the memory of this device const int device_slice_count, // order number of device used in this evaluation const int device_order_number ) { //const int shape_x = grid->shape[X_], shape_y = grid->shape[Y_], shape_z = grid->shape[Z_]; /*const int shape_x = grid->axis[X_]->ncell * (grid->axis[X_]->nlip - 1) + 1; const int shape_y = grid->axis[Y_]->ncell * (grid->axis[Y_]->nlip - 1) + 1; const int shape_z = grid->axis[Z_]->ncell * (grid->axis[Z_]->nlip - 1) + 1;*/ // The result array will be in fortran with indices l, x, y, z. // This means that the x index will be the fastest to change. int x, y, z; getXYZ3D(&x, &y, &z); // Check that the point is within the block if (x < shape_x && y < shape_y && z+slice_offset < shape_z && z < slice_count) { // get the id of the point in the result array int id = + z * shape_y * pitch / sizeof(double) + y * pitch / sizeof(double) + x; double relative_position_x, relative_position_y, relative_position_z, distance = 0.0; // calculate relative position to the zero-point and distance to it calc_distance(&relative_position_x, &relative_position_y, &relative_position_z, &distance, zero_point_x, zero_point_y, zero_point_z, gridpoints_x[x], gridpoints_y[y], gridpoints_z[z+slice_offset]); // calculate the solid harmonic value for the point RealRegularSolidHarmonics_evaluate_point_simple(relative_position_x, relative_position_y, relative_position_z, lmax, (int) pitch / sizeof(double) * shape_y * device_slice_count, &cubes[id]); } return; } __global__ void RealRegularSolidHarmonics_evaluate_grid_kernel_fast( Grid3D *grid, double *cubes, const double zero_point_x, const double zero_point_y, const double zero_point_z, const int lmin, const int lmax){ // get the id of the point (We are using only the first ) const int id=threadIdx.x + blockIdx.x * blockDim.x; const int shape_x = grid->axis[X_]->ncell * (grid->axis[X_]->nlip - 1) + 1; const int shape_y = grid->axis[Y_]->ncell * (grid->axis[Y_]->nlip - 1) + 1; const int shape_z = grid->axis[Z_]->ncell * (grid->axis[Z_]->nlip - 1) + 1; // The result array will be in fortran with indices l, x, y, z. // This means that the x index will be the fastest to change. const short z = id / (shape_x * shape_y); const short y = (id - z * shape_x * shape_y) / (shape_x); const short x = (id - z * shape_x * shape_y - y * shape_x); // Check that the point is within the block if (x < shape_x && y < shape_y && z < shape_z) { double relative_position_x, relative_position_y, relative_position_z, distance = 0.0; // calculate relative position to the zero-point and distance to it calc_distance(&relative_position_x, &relative_position_y, &relative_position_z, &distance, zero_point_x, zero_point_y, zero_point_z, grid->axis[X_]->gridpoints[x], grid->axis[Y_]->gridpoints[y], grid->axis[Z_]->gridpoints[z]); // calculate the solid harmonic value for the point RealRegularSolidHarmonics_evaluate_point_simple(relative_position_x, relative_position_y, relative_position_z, lmax, (int) shape_x * shape_y * shape_z, &cubes[id]); } return; } __global__ void RealSphericalCubeHarmonics_evaluate_grid( const int shape_x, const int shape_y, const int shape_z, const double *gridpoints_x, const double *gridpoints_y, const double *gridpoints_z, const int lmin, const int lmax, const int normalization, double *cubes, size_t pitch, const double zero_point_x, const double zero_point_y, const double zero_point_z, const int slice_offset, // the number of slices handled by this kernel call const int slice_count, // the number of slices that resides in the memory of this device const int device_slice_count) { // The result array will be in fortran with indices l, x, y, z. // This means that the x index will be the fastest to change. int x, y, z, i, l, m; double normalization_factor; getXYZ3D(&x, &y, &z); // Check that the point is within the block if (x < shape_x && y < shape_y && z+slice_offset < shape_z && z < slice_count) { // get the id of the point in the result array int id = + z * shape_y * pitch / sizeof(double) + y * pitch / sizeof(double) + x; double relative_position_x, relative_position_y, relative_position_z, distance = 0.0; // calculate relative position to the zero-point and distance to it calc_distance(&relative_position_x, &relative_position_y, &relative_position_z, &distance, zero_point_x, zero_point_y, zero_point_z, gridpoints_x[x], gridpoints_y[y], gridpoints_z[z+slice_offset]); // calculate the real harmonics values for the point if (distance > 1e-12) { RealSphericalHarmonics_evaluate_point_simple(relative_position_x, relative_position_y, relative_position_z, distance, lmax, (int) pitch / sizeof(double) * shape_y * device_slice_count, &cubes[id]); } else { i = 0; for (l = lmin; l <= lmax; l++) { for (m = -l; m <= l; m++) { cubes[id+i] = 0.0; i += pitch / sizeof(double) * shape_y * device_slice_count; } } if (lmin == 0) cubes[id] = 1.0; } // Multiply with normalization factor sqrt((2*l+1) / (4 * pi)), if // we are using conventional normalization if (normalization == 2) { i = 0; normalization_factor = 1.0; for (l = lmin; l <= lmax; l++) { normalization_factor = sqrt((2.0*(double)l+1.0)/(4.0*M_PI)); for (m = -l; m <= l; m++) { cubes[id+i] *= normalization_factor; i += pitch / sizeof(double) * shape_y * device_slice_count; } } } } return; } __global__ void RealSphericalHarmonics_evaluate_grid_kernel_fast(Grid3D *grid, double *cubes, const double zero_point_x, const double zero_point_y, const double zero_point_z, const int lmin, const int lmax, const int normalization ) { int i = 0; // load the number_of_lm_terms to shared memory /* extern __shared__ int shared_memory[]; int *number_of_lm_terms = shared_memory; i = threadIdx.x; while (i < ijk_max_) { number_of_lm_terms[i] = harmonics->number_of_lm_terms[i]; i += blockDim.x; } __syncthreads(); i = threadIdx.x; int *lm_indices = &shared_memory[ijk_max_]; while (i < number_of_lm_terms[ijk_max_-1]) { lm_indices[i] = harmonics->lm_indices[i]; i += blockDim.x; } __syncthreads(); // load the coefficients to shared memory i = threadIdx.x; double *coefficients = (double * )&shared_memory[ijk_max_+number_of_lm_terms[ijk_max_-1]]; while (i < number_of_lm_terms[ijk_max_-1]) { coefficients[i] = harmonics->new_coefficients[i]; i += blockDim.x; } __syncthreads();*/ // get the id of the point (We are using only the first ) const int id=threadIdx.x + blockIdx.x * blockDim.x; const int shape_x = grid->axis[X_]->ncell * (grid->axis[X_]->nlip - 1) + 1; const int shape_y = grid->axis[Y_]->ncell * (grid->axis[Y_]->nlip - 1) + 1; const int shape_z = grid->axis[Z_]->ncell * (grid->axis[Z_]->nlip - 1) + 1; // The result array will be in fortran with indices l, x, y, z. // This means that the x index will be the fastest to change. const int z = id / (shape_x * shape_y); const int y = (id - z * shape_x * shape_y) / (shape_x); const int x = (id - z * shape_x * shape_y - y * shape_x); int l = 0; int m = 0; double normalization_factor = 0.0; // Check that the point is within the block if (x < shape_x && y < shape_y && z < shape_z) { // get pointer to the result array value we are evaluating // first get the number of lm-pairs //const int address = z * (shape_x * shape_y) // + y * (shape_x) // + x; double relative_position_x, relative_position_y, relative_position_z, distance = 0.0; // calculate relative position to the zero-point and distance to it calc_distance(&relative_position_x, &relative_position_y, &relative_position_z, &distance, zero_point_x, zero_point_y, zero_point_z, grid->axis[X_]->gridpoints[x], grid->axis[Y_]->gridpoints[y], grid->axis[Z_]->gridpoints[z]); // calculate the solid harmonic values for the point //RealRegularSolidHarmonics_evaluate_point_new(harmonics, relative_position_x, relative_position_y, // relative_position_z, (int) shape_x * shape_y * shape_z, // number_of_lm_terms, coefficients, lm_indices, &cubes[address]); if (distance > 1e-4) { RealSphericalHarmonics_evaluate_point_simple(relative_position_x, relative_position_y, relative_position_z, distance, lmax, (int) shape_x * shape_y * shape_z, &cubes[id]); } else { cubes[id] = 1.0; } // Multiply with normalization factor sqrt((2*l+1) / (4 * pi)), if // we are using conventional normalization if (normalization == 2) { i = 0; normalization_factor = 1.0; for (l = lmin; l <= lmax; l++) { normalization_factor = sqrt((2.0*(double)l+1.0)/(4.0*M_PI)); for (m = -l; m <= l; m++) { cubes[id+i] *= normalization_factor; i += shape_x*shape_y*shape_z; } } } } return; } __host__ inline void check_cuda_errors(const char *filename, const int line_number) { #ifdef DEBUG_CUDA hipDeviceSynchronize(); #endif hipError_t error = hipGetLastError(); if(error != hipSuccess) { printf("CUDA error at %s:%i: %s\n", filename, line_number, hipGetErrorString(error)); exit(-1); } } /* * Set cube values withing width, height and depth to zero */ __global__ void set_cube_to_zero(double *cube, long pitch, long row_count, const int width, const int height, const int depth, const int size) { // get the id of the point (We are using only the first ) const int id=threadIdx.x + blockIdx.x * blockDim.x; const int shape_x = width; const int shape_y = height; // The result array will be in fortran with indices l, x, y, z. // This means that the x index will be the fastest to change. const int z = id / (shape_x * shape_y); const int y = (id - z * shape_x * shape_y) / (shape_x); const int x = (id - z * shape_x * shape_y - y * shape_x); long cube_pointer; double *cube_value; if (x < width && y < height && z < depth) { cube_pointer = (long)cube + x * size + y * pitch + z * row_count * pitch; cube_value = (double *)cube_pointer; *cube_value = 0.0; } } extern "C" void cube_set_to_zero_(int *, int *, int *, int *, long *, long *, long *, long *, int *, int *); extern "C" void cube_set_to_zero_(int *deviceID, int *width, int *height, int *depth, long *devPtr, long *pitch, long *x_size, long *y_size, int *size, int *number_of_devices) { // Allocate cube // create extent for the cube int device = (*deviceID-1)%(*number_of_devices); hipSetDevice(device); int blockSize; // The launch configurator returned block size int minGridSize; // The minimum grid size needed to achieve the // maximum occupancy for a full device // launch int gridSize; // The actual grid size needed, based on input // size hipOccupancyMaxPotentialBlockSize( &minGridSize, &blockSize, (void*)set_cube_to_zero, 0, (*width) * (*height) * (*depth)); // Round up according to array size gridSize = ((*width) * (*height) * (*depth) + blockSize - 1) / blockSize; //printf("device: %d, gridsize: %d, blocksize:%d, dev cube: %ld, pitch: %ld, y_size: %ld, width: %d, height: %d, depth: %d", // device, gridSize, blockSize, *devPtr, *pitch, *y_size, *width, *height, *depth //); hipLaunchKernelGGL(( set_cube_to_zero), dim3(gridSize), dim3(blockSize), 0, 0, (double *)*devPtr, *pitch, *y_size, *width, *height, *depth, *size); // hipExtent extent = make_hipExtent(*width * sizeof(RP), *height, *depth); // hipPitchedPtr devPitchedPtr = make_hipPitchedPtr((void *)*devPtr, *pitch, *x_size, *y_size); // printf("setting zero ptr %ld, xsize: %ld, ysize: %ld, pitch: %ld", *devPtr, devPitchedPtr.xsize, devPitchedPtr.ysize, devPitchedPtr.pitch); // set_cube_to_zero<<<>>> check_cuda_errors(__FILE__, __LINE__); return; } /************************************************* * Host RealHarmonics functionality * * ***********************************************/ void RealHarmonics::initRealHarmonics(int lmin, int lmax, int normalization, StreamContainer *streamContainer) { this->lmin = lmin; this->lmax = lmax; this->normalization = normalization; this->streamContainer = streamContainer; } /************************************************* * Host RealCubeHarmonics functionality * * ***********************************************/ int *RealCubeHarmonics::getShape() { return this->shape; } double **RealCubeHarmonics::getDeviceResults() { return this->device_results; } double *RealCubeHarmonics::getDeviceResults(int device) { return this->device_results[device]; } size_t *RealCubeHarmonics::getDevicePitches() { return this->device_pitches; } size_t RealCubeHarmonics::getDevicePitch(int device) { return this->device_pitches[device]; } void RealCubeHarmonics::initCubeHarmonics(int lmin, int lmax, int normalization, int shape[3], StreamContainer *streamContainer) { this->initRealHarmonics(lmin, lmax, normalization, streamContainer); // allocate space for device cube pointers this->device_results = new double*[this->streamContainer->getNumberOfDevices()]; this->device_pitches = new size_t[this->streamContainer->getNumberOfDevices()]; this->device_copies = new RealHarmonics*[this->streamContainer->getNumberOfDevices()]; // copy the shape this->shape[X_] = shape[X_]; this->shape[Y_] = shape[Y_]; this->shape[Z_] = shape[Z_]; // the limits of the lmax array int ilmmax = (this->lmax+1)*(this->lmax+1); //int ilmmin = (this->lmin)*(this->lmin); for (int device = 0; device < this->streamContainer->getNumberOfDevices(); device ++) { // set the correct GPU this->streamContainer->setDevice(device); hipPitchedPtr pointer; // get the portion that is handled by device with order number 'device' int device_slice_count = shape[Z_] / this->streamContainer->getNumberOfDevices() + ((shape[Z_] % this->streamContainer->getNumberOfDevices()) > device); // allocate memory for entire shape for the main pointers hipExtent extent = make_hipExtent(shape[X_] * sizeof(double), shape[Y_], device_slice_count * ilmmax ); hipMalloc3D (&pointer, extent); check_cuda_errors(__FILE__, __LINE__); this->device_pitches[device] = pointer.pitch; this->device_results[device] = (double *) pointer.ptr; // allocate the device memory and copy hipMalloc(&this->device_copies[device], sizeof(*this)); hipMemcpy(this->device_copies[device], this, sizeof(*this), hipMemcpyHostToDevice); check_cuda_errors(__FILE__, __LINE__); } } void RealCubeHarmonics::evaluate(Grid3D *grid, double center[3]) { check_cuda_errors(__FILE__, __LINE__); int slice_offset = 0; for (int device = 0; device < this->streamContainer->getNumberOfDevices(); device ++) { // set the correct GPU this->streamContainer->setDevice(device); size_t device_pitch = this->getDevicePitch(device); double *device_results = this->getDeviceResults(device); // get the portion that is handled by device with order number 'device' int device_slice_count = shape[Z_] / this->streamContainer->getNumberOfDevices() + ((shape[Z_] % this->streamContainer->getNumberOfDevices()) > device); for (int stream = 0; stream < this->streamContainer->getStreamsPerDevice(); stream ++) { // get the portion that is handled by stream with order number 'stream' int slice_count = device_slice_count / this->streamContainer->getStreamsPerDevice() + ((device_slice_count % this->streamContainer->getStreamsPerDevice()) > stream); // do the per-stream evaluation (NOTE: the functions hide the kernel calls to the spherical harmonics / solid harmonics) this->evaluateSingleStream(device_results, device_pitch, device, grid, center, slice_count, device_slice_count, slice_offset, this->streamContainer->getStream(device, stream)); check_cuda_errors(__FILE__, __LINE__); // add to the slice_offset slice_offset += slice_count; // add to the cube pointer device_results += device_pitch / sizeof(double) * this->shape[Y_] * slice_count; } } } /* * Note: works best if the host_results is registered/inited as pinned before using this method * * @param host_results pointer to a four dimensional array of shape (shape[X_], shape[Y_], shape[Z_], lmax) * @param host_results_shape (x, y, z, l) */ void RealCubeHarmonics::download(double *host_results, int host_results_shape[4]) { check_cuda_errors(__FILE__, __LINE__); for (int device = 0; device < this->streamContainer->getNumberOfDevices(); device ++) { // set the correct GPU this->streamContainer->setDevice(device); // and get the corresponding cube pointer and pitch size_t device_pitch = this->getDevicePitch(device); double *device_results = this->getDeviceResults(device); // get the portion that is handled by device with order number 'device' int device_slice_count = this->shape[Z_] / this->streamContainer->getNumberOfDevices() + ((this->shape[Z_] * (this->lmax+1) % this->streamContainer->getNumberOfDevices()) > device); for (int stream = 0; stream < this->streamContainer->getStreamsPerDevice(); stream ++) { // get the portion that is handled by stream with order number 'stream' int slice_count = device_slice_count / this->streamContainer->getStreamsPerDevice() + ((device_slice_count % this->streamContainer->getStreamsPerDevice()) > stream); int lm_offset = 0; int lm_device_offset = 0; if (slice_count > 0) { for (int n = this->lmin*this->lmin; n < (this->lmax +1) * (this->lmax +1); n++) { hipMemcpy3DParms memCopyParameters = {0}; memCopyParameters.dstPtr = make_hipPitchedPtr(&host_results[lm_offset], host_results_shape[X_]*sizeof(double), host_results_shape[X_], host_results_shape[Y_]); memCopyParameters.srcPtr = make_hipPitchedPtr(&device_results[lm_device_offset], device_pitch, shape[X_], shape[Y_]); memCopyParameters.extent = make_hipExtent(this->shape[X_] * sizeof(double), this->shape[Y_], slice_count); memCopyParameters.kind = hipMemcpyDeviceToHost; // copy the f1 cube to device: 3D hipMemcpy3DAsync(&memCopyParameters, *this->streamContainer->getStream(device, stream)); check_cuda_errors(__FILE__, __LINE__); // add to the offsets caused by the l lm_offset += this->shape[X_] * this->shape[Y_] * this->shape[Z_]; lm_device_offset += device_pitch / sizeof(double) * this->shape[Y_] * device_slice_count; } // add to the result pointers host_results += slice_count * host_results_shape[X_] * host_results_shape[Y_]; device_results += device_pitch / sizeof(double) * this->shape[Y_] * slice_count; } } } } void RealCubeHarmonics::registerHostResultArray(double *host_results, int host_results_shape[4]) { // register host memory for download hipHostRegister(host_results, host_results_shape[0]*host_results_shape[1]*host_results_shape[2]*host_results_shape[3] * sizeof(double), hipHostRegisterPortable); check_cuda_errors(__FILE__, __LINE__); } void RealCubeHarmonics::unregisterHostResultArray(double *host_results) { // unregister host memory hipHostUnregister(host_results); check_cuda_errors(__FILE__, __LINE__); } void RealCubeHarmonics::destroy() { for (int device = 0; device < this->streamContainer->getNumberOfDevices(); device ++) { hipFree(this->device_results[device]); hipFree(this->device_copies[device]); } delete[] this->device_results; delete[] this->device_pitches; delete[] this->device_copies; } /**************************************************** * Host RealRegularSolidCubeHarmonics functionality * ****************************************************/ RealRegularSolidCubeHarmonics::RealRegularSolidCubeHarmonics(int lmin, int lmax, int normalization, int shape[3], StreamContainer *streamContainer) { this->initCubeHarmonics(lmin, lmax, normalization, shape, streamContainer); } void RealRegularSolidCubeHarmonics::evaluateSingleStream(double *device_results, size_t device_pitch, int device, Grid3D *grid3d, double center[3], int slice_count, int device_slice_count, int slice_offset, hipStream_t *stream) { if (slice_count > 0) { // get the launch configuration dim3 grid, block; getCubeLaunchConfiguration(&grid, &block, this->shape, slice_count, 256); // call the kernel hipLaunchKernelGGL(( RealRegularSolidCubeHarmonics_evaluate_grid) , dim3(grid), dim3(block), 0, *stream , grid3d->getShape(X_), grid3d->getShape(Y_), grid3d->getShape(Z_), grid3d->axis[X_]->device_gridpoints[device], grid3d->axis[Y_]->device_gridpoints[device], grid3d->axis[Z_]->device_gridpoints[device], lmax, device_results, device_pitch, center[X_], center[Y_], center[Z_], slice_offset, slice_count, device_slice_count, device); check_cuda_errors(__FILE__, __LINE__); } } /**************************************************** * Host RealSphericalCubeHarmonics functionality * ****************************************************/ RealSphericalCubeHarmonics::RealSphericalCubeHarmonics(int lmin, int lmax, int normalization, int shape[3], StreamContainer *streamContainer) { this->initCubeHarmonics(lmin, lmax, normalization, shape, streamContainer); } void RealSphericalCubeHarmonics::evaluateSingleStream(double *device_results, size_t device_pitch, int device, Grid3D *grid3d, double center[3], int slice_count, int device_slice_count, int slice_offset, hipStream_t *stream) { if (slice_count > 0) { // get the launch configuration dim3 grid, block; getCubeLaunchConfiguration(&grid, &block, this->shape, slice_count, 256); // call the kernel hipLaunchKernelGGL(( RealSphericalCubeHarmonics_evaluate_grid) , dim3(grid), dim3(block), 0, *stream , grid3d->getShape(X_), grid3d->getShape(Y_), grid3d->getShape(Z_), grid3d->axis[X_]->device_gridpoints[device], grid3d->axis[Y_]->device_gridpoints[device], grid3d->axis[Z_]->device_gridpoints[device], this->lmin, this->lmax, this->normalization, device_results, device_pitch, center[X_], center[Y_], center[Z_], slice_offset, slice_count, device_slice_count ); check_cuda_errors(__FILE__, __LINE__); } } /******************************************************* * Fortran interfaces - RealRegularSolidCubeHarmonics * *******************************************************/ extern "C" RealRegularSolidCubeHarmonics *realregularsolidcubeharmonics_init_cuda(int lmin, int lmax, int normalization, int shape[3], StreamContainer *streamContainer) { return new RealRegularSolidCubeHarmonics(lmin, lmax, normalization, shape, streamContainer); } extern "C" void realregularsolidcubeharmonics_destroy_cuda(RealRegularSolidCubeHarmonics *harmonics) { harmonics->destroy(); } extern "C" void realregularsolidcubeharmonics_evaluate_cuda(RealRegularSolidCubeHarmonics *harmonics, Grid3D *grid, double center[3]) { harmonics->evaluate(grid, center); } /**************************************************** * Fortran interfaces - RealSphericalCubeHarmonics * ****************************************************/ extern "C" RealSphericalCubeHarmonics *realsphericalcubeharmonics_init_cuda(int lmin, int lmax, int normalization, int shape[3], StreamContainer *streamContainer) { return new RealSphericalCubeHarmonics(lmin, lmax, normalization, shape, streamContainer); } extern "C" void realsphericalcubeharmonics_download_cuda(RealSphericalCubeHarmonics *harmonics, double *host_results, int host_results_shape[4]) { harmonics->download(host_results, host_results_shape); } extern "C" void realsphericalcubeharmonics_destroy_cuda(RealSphericalCubeHarmonics *harmonics) { harmonics->destroy(); } extern "C" void realsphericalcubeharmonics_evaluate_cuda(RealSphericalCubeHarmonics *harmonics, Grid3D *grid, double center[3]) { harmonics->evaluate(grid, center); } /**************************************************** * Fortran interfaces - RealCubeHarmonics * ****************************************************/ extern "C" void realcubeharmonics_register_result_array_cuda(RealCubeHarmonics *harmonics, double *host_results, int host_results_shape[4]) { harmonics->registerHostResultArray(host_results, host_results_shape); } extern "C" void realcubeharmonics_unregister_result_array_cuda(RealCubeHarmonics *harmonics, double *host_results, int host_results_shape[4]) { harmonics->unregisterHostResultArray(host_results); }
b7e4ac95903b232794a424c35ac0fe4797696a01.cu
/*----------------------------------------------------------------------------------* * Copyright (c) 2010-2018 Pauli Parkkinen, Eelis Solala, Wen-Hua Xu, * * Sergio Losilla, Elias Toivanen, Jonas Juselius * * * * Permission is hereby granted, free of charge, to any person obtaining a copy * * of this software and associated documentation files (the "Software"), to deal * * in the Software without restriction, including without limitation the rights * * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell * * copies of the Software, and to permit persons to whom the Software is * * furnished to do so, subject to the following conditions: * * * * The above copyright notice and this permission notice shall be included in all* * copies or substantial portions of the Software. * * * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE * * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER * * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, * * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE * * SOFTWARE. * *----------------------------------------------------------------------------------*/ /*! @file spherical_harmonics_cuda.cu *! @brief CUDA implementation of the spherical harmonics evaluation. */ #include "streamcontainer.h" #include "integrator.h" #include "spherical_harmonics_cuda.h" #include "bubbles_cuda.h" #include "grid.h" #include "cube.h" #include "memory_leak_operators.h" #include <cuda.h> #include <stdlib.h> #include <stdio.h> #define X_ 0 #define Y_ 1 #define Z_ 2 #define R_ 3 #define TERM_COUNT 10 #define MAX_TERM_COUNT 300 /** \brief Size of the CUDA blocks in the X dimension */ #define BLOCKDIMX 8 /** \brief Size of the CUDA blocks in the Y dimension */ #define BLOCKDIMY 4 /** \brief Size of the CUDA blocks in the Z dimension */ #define BLOCKDIMZ 4 #define BLOCKSZ 64 cudaError_t cudaErrorStat; __constant__ int shape_x_, shape_y_, shape_z_, lmax_, ilmmin_, lmin_, ilmmax_, first_term_, normalization_, ijk_max_; inline __device__ void calc_distance(double *dist_vec_x, double *dist_vec_y, double *dist_vec_z, double *dist, const double reference_point_x, const double reference_point_y, const double reference_point_z, const double x, const double y, const double z){ // calculate the vector relative to reference_point *dist_vec_x=x-reference_point_x; *dist_vec_y=y-reference_point_y; *dist_vec_z=z-reference_point_z; // evaluate the length of the dist_vector, i.e., the distance between dist_vec and reference_point *dist=sqrt((*dist_vec_x) * (*dist_vec_x)+ (*dist_vec_y) * (*dist_vec_y)+ (*dist_vec_z) * (*dist_vec_z)); return; } __device__ void RealSphericalHarmonics_evaluate_point_simple(const double x, const double y, const double z, const double r, const int lmax, const int lm_address_difference, double *result) { int lm_address =0, address2 = 0; int l, m, l2; double top = 0.0, bottom = 0.0, new_bottom = 0.0, prev1 = 0.0, prev2 = 0.0, current = 0.0; //double r2 = x*x+y*y+z*z; // set value for l=0, m=0 result[lm_address] = 1.0; // set value for l=1, m=-1 lm_address += lm_address_difference; result[lm_address] = y / r; // set all values where m=-1 m = -1; prev1 = y / r; // the starting address has 1 item before from the l=0, 3 from l=1, and 1 from l=2 address2 = 5 * lm_address_difference; for (l = 2; l <= lmax; l++) { current = ( 2.0*(double)l-1.0) / sqrt( 1.0*(double)((l+m)*(l-m)) ) * z*prev1 / r; if (l > 2) { current -= sqrt( (double)((l+m-1)*(l-m-1)) / (double)((l+m)*(l-m)) ) * prev2; } prev2 = prev1; prev1 = current; result[address2] = current; // add the address2 to get to the next item with m=0 address2 += lm_address_difference * (2*l+2); } // set value for l=1, m=0 lm_address += lm_address_difference; result[lm_address] = z / r; // set all values where m=0 prev1 = z / r; prev2 = 1.0; m = 0; // the starting address has 1 item before from the l=0, 3 from l=1, and 2 from l=2 address2 = 6 * lm_address_difference; for (l = 2; l <= lmax; l++) { current = ( 2.0*(double)l-1.0) / sqrt( 1.0*(double)((l+m)*(l-m)) ) * z * prev1 / r; current -= sqrt( (double)((l+m-1)*(l-m-1)) / (double)((l+m)*(l-m)) ) * prev2; prev2 = prev1; prev1 = current; result[address2] = current; // add the address2 to get to the next item with m=0 address2 += lm_address_difference * (2*l+2); } // set value for l=1, m=1 lm_address += lm_address_difference; result[lm_address] = x / r; // set all values where m=1 prev1 = x / r; m = 1; // the starting address has 1 item before from the l=0, 3 from l=1, and 3 from l=2 address2 = 7 * lm_address_difference; for (l = 2; l <= lmax; l++) { current = ( 2.0*(double)l-1.0) / sqrt( 1.0*(double)((l+m)*(l-m)) ) * z*prev1 / r; if (l > 2) { current -= sqrt( (double)((l+m-1)*(l-m-1)) / (double)((l+m)*(l-m)) ) * prev2; } prev2 = prev1; prev1 = current; result[address2] = current; // add the address2 to get to the next item with m=0 address2 += lm_address_difference * (2*l+2); } // go through the rest of the stuff bottom = y / r; // bottom refers to real spherical harmonics value with l=l-1 and m=-(l-1) top = x / r; // top refers to real spherical harmonics value with l=l-1 and m=l-1 lm_address += lm_address_difference; for (l=2; l <= lmax; l++) { new_bottom = sqrt((2.0*(double)l - 1.0) / (2.0*(double)l)) * ( y*top + x*bottom) / r; result[lm_address] = new_bottom; // set all values where m=-l m = -l; prev1 = new_bottom; address2 = lm_address + (2*l+2) * lm_address_difference; for (l2 = l+1; l2 <= lmax; l2++) { current = ( 2.0*(double)l2-1.0) / sqrt( 1.0*(double)((l2+m)*(l2-m)) ) * z*prev1 / r; if (l2 > l+1) { current -= sqrt( (double)((l2+m-1)*(l2-m-1)) / (double)((l2+m)*(l2-m)) ) * prev2; } prev2 = prev1; prev1 = current; result[address2] = current; // add the address2 to get to the next item with m=l address2 += lm_address_difference * (2*l2+2); } // get value for l=l, m=l. The address is 2*l items away from l=l, m=-l lm_address += 2*l*lm_address_difference; top = sqrt((2.0*(double)l - 1.0) / (2.0*(double)l)) * ( x*top-y*bottom ) / r; // set all values where m=l m = l; prev1 = top; address2 = lm_address + (2*l+2) * lm_address_difference; for (l2 = l+1; l2 <= lmax; l2++) { current = ( 2.0*(double)l2-1.0) / sqrt( 1.0*(double)((l2+m)*(l2-m)) ) * z*prev1 / r; if (l2 > l+1) { current -= sqrt( (double)((l2+m-1)*(l2-m-1)) / (double)((l2+m)*(l2-m)) ) * prev2; } prev2 = prev1; prev1 = current; result[address2] = current; // add the address2 to get to the next item with m=l address2 += lm_address_difference * (2*l2+2); } // store the new bottom: l=l, m=-l (we need the old bottom in calculation of top) bottom = new_bottom; result[lm_address] = top; // get next address lm_address += lm_address_difference; } } __device__ void RealRegularSolidHarmonics_evaluate_point_simple(const double x, const double y, const double z, const int lmax, const int lm_address_difference, double *result) { int lm_address =0, address2 = 0; int l, m, l2; double top = 0.0, bottom = 0.0, new_bottom = 0.0, prev1 = 0.0, prev2 = 0.0, current = 0.0; double r2 = x*x+y*y+z*z; // set value for l=0, m=0 result[lm_address] = 1.0; // set value for l=1, m=-1 lm_address += lm_address_difference; result[lm_address] = y; // set all values where m=-1 m = -1; prev1 = y; // the starting address has 1 item before from the l=0, 3 from l=1, and 1 from l=2 address2 = 5 * lm_address_difference; for (l = 2; l <= lmax; l++) { current = ( 2.0*(double)l-1.0) / sqrt( 1.0*(double)((l+m)*(l-m)) ) * z*prev1; if (l > 2) { current -= sqrt( (double)((l+m-1)*(l-m-1)) / (double)((l+m)*(l-m)) ) * r2 * prev2; } prev2 = prev1; prev1 = current; result[address2] = current; // add the address2 to get to the next item with m=0 address2 += lm_address_difference * (2*l+2); } // set value for l=1, m=0 lm_address += lm_address_difference; result[lm_address] = z; // set all values where m=0 prev1 = z; prev2 = 1.0; m = 0; // the starting address has 1 item before from the l=0, 3 from l=1, and 2 from l=2 address2 = 6 * lm_address_difference; for (l = 2; l <= lmax; l++) { current = ( 2.0*(double)l-1.0) / sqrt( 1.0*(double)((l+m)*(l-m)) ) * z * prev1; current -= sqrt( (double)((l+m-1)*(l-m-1)) / (double)((l+m)*(l-m)) ) * r2 * prev2; prev2 = prev1; prev1 = current; result[address2] = current; // add the address2 to get to the next item with m=0 address2 += lm_address_difference * (2*l+2); } // set value for l=1, m=1 lm_address += lm_address_difference; result[lm_address] = x; // set all values where m=1 prev1 = x; m = 1; // the starting address has 1 item before from the l=0, 3 from l=1, and 3 from l=2 address2 = 7 * lm_address_difference; for (l = 2; l <= lmax; l++) { current = ( 2.0*(double)l-1.0) / sqrt( 1.0*(double)((l+m)*(l-m)) ) * z*prev1; if (l > 2) { current -= sqrt( (double)((l+m-1)*(l-m-1)) / (double)((l+m)*(l-m)) ) * r2 * prev2; } prev2 = prev1; prev1 = current; result[address2] = current; // add the address2 to get to the next item with m=0 address2 += lm_address_difference * (2*l+2); } // go through the rest of the stuff bottom = y; // bottom refers to solid harmonics value with l=l-1 and m=-(l-1) top = x; // top refers to solid harmonics value with l=l-1 and m=l-1 lm_address += lm_address_difference; for (l=2; l <= lmax; l++) { new_bottom = sqrt((2.0*(double)l - 1.0) / (2.0*(double)l)) * ( y*top + x*bottom); result[lm_address] = new_bottom; // set all values where m=-l m = -l; prev1 = new_bottom; address2 = lm_address + (2*l+2) * lm_address_difference; for (l2 = l+1; l2 <= lmax; l2++) { current = ( 2.0*(double)l2-1.0) / sqrt( 1.0*(double)((l2+m)*(l2-m)) ) * z*prev1; if (l2 > l+1) { current -= sqrt( (double)((l2+m-1)*(l2-m-1)) / (double)((l2+m)*(l2-m)) ) * r2 * prev2; } prev2 = prev1; prev1 = current; result[address2] = current; // add the address2 to get to the next item with m=l address2 += lm_address_difference * (2*l2+2); } // get value for l=l, m=l. The address is 2*l items away from l=l, m=-l lm_address += 2*l*lm_address_difference; top = sqrt((2.0*(double)l - 1.0) / (2.0*(double)l)) * ( x*top-y*bottom ); // set all values where m=l m = l; prev1 = top; address2 = lm_address + (2*l+2) * lm_address_difference; for (l2 = l+1; l2 <= lmax; l2++) { current = ( 2.0*(double)l2-1.0) / sqrt( 1.0*(double)((l2+m)*(l2-m)) ) * z*prev1; if (l2 > l+1) { current -= sqrt( (double)((l2+m-1)*(l2-m-1)) / (double)((l2+m)*(l2-m)) ) * r2 * prev2; } prev2 = prev1; prev1 = current; result[address2] = current; // add the address2 to get to the next item with m=l address2 += lm_address_difference * (2*l2+2); } // store the new bottom: l=l, m=-l (we need the old bottom in calculation of top) bottom = new_bottom; result[lm_address] = top; // get next address lm_address += lm_address_difference; } } __device__ inline void getXYZ3D(int *x, int *y, int *z) { *x = blockIdx.x * blockDim.x + threadIdx.x; *y = blockIdx.y * blockDim.y + threadIdx.y; *z = blockIdx.z * blockDim.z + threadIdx.z; } /*__global__ void RealRegularSolidHarmonics_evaluate_3d( int shape_x, int shape_y, shape_z, int slice_offset, int slice_count, int pitch, int memory_shape_y, int memory_shape_z, double *cubes, const double zero_point_x, const double zero_point_y, const double zero_point_z, const int lmin, const int lmax) { // get the id of the point (We are using only the first ) int x, y, z; getXYZ3D(&x, &y, &z); z += slice_offset; // Check that the point is within the block & is within the handled slices if (x < shape_x && y < shape_y && z < shape_z && z < slice_count) { double relative_position_x, relative_position_y, relative_position_z, distance = 0.0; // calculate relative position to the zero-point and distance to it calc_distance(&relative_position_x, &relative_position_y, &relative_position_z, &distance, zero_point_x, zero_point_y, zero_point_z, grid->grid_points_x[x], grid->grid_points_y[y], grid->grid_points_z[z]); // calculate the solid harmonic value for the point RealRegularSolidHarmonics_evaluate_point_simple(relative_position_x, relative_position_y, relative_position_z, lmax, (int) pitch * memory_shape_y * memory_shape_z, &cubes[id]); } return; }*/ __global__ void RealRegularSolidCubeHarmonics_evaluate_grid( const int shape_x, const int shape_y, const int shape_z, const double *gridpoints_x, const double *gridpoints_y, const double *gridpoints_z, const int lmax, double *cubes, const size_t pitch, const double zero_point_x, const double zero_point_y, const double zero_point_z, const int slice_offset, // the number of slices handled by this kernel call const int slice_count, // the number of slices that resides in the memory of this device const int device_slice_count, // order number of device used in this evaluation const int device_order_number ) { //const int shape_x = grid->shape[X_], shape_y = grid->shape[Y_], shape_z = grid->shape[Z_]; /*const int shape_x = grid->axis[X_]->ncell * (grid->axis[X_]->nlip - 1) + 1; const int shape_y = grid->axis[Y_]->ncell * (grid->axis[Y_]->nlip - 1) + 1; const int shape_z = grid->axis[Z_]->ncell * (grid->axis[Z_]->nlip - 1) + 1;*/ // The result array will be in fortran with indices l, x, y, z. // This means that the x index will be the fastest to change. int x, y, z; getXYZ3D(&x, &y, &z); // Check that the point is within the block if (x < shape_x && y < shape_y && z+slice_offset < shape_z && z < slice_count) { // get the id of the point in the result array int id = + z * shape_y * pitch / sizeof(double) + y * pitch / sizeof(double) + x; double relative_position_x, relative_position_y, relative_position_z, distance = 0.0; // calculate relative position to the zero-point and distance to it calc_distance(&relative_position_x, &relative_position_y, &relative_position_z, &distance, zero_point_x, zero_point_y, zero_point_z, gridpoints_x[x], gridpoints_y[y], gridpoints_z[z+slice_offset]); // calculate the solid harmonic value for the point RealRegularSolidHarmonics_evaluate_point_simple(relative_position_x, relative_position_y, relative_position_z, lmax, (int) pitch / sizeof(double) * shape_y * device_slice_count, &cubes[id]); } return; } __global__ void RealRegularSolidHarmonics_evaluate_grid_kernel_fast( Grid3D *grid, double *cubes, const double zero_point_x, const double zero_point_y, const double zero_point_z, const int lmin, const int lmax){ // get the id of the point (We are using only the first ) const int id=threadIdx.x + blockIdx.x * blockDim.x; const int shape_x = grid->axis[X_]->ncell * (grid->axis[X_]->nlip - 1) + 1; const int shape_y = grid->axis[Y_]->ncell * (grid->axis[Y_]->nlip - 1) + 1; const int shape_z = grid->axis[Z_]->ncell * (grid->axis[Z_]->nlip - 1) + 1; // The result array will be in fortran with indices l, x, y, z. // This means that the x index will be the fastest to change. const short z = id / (shape_x * shape_y); const short y = (id - z * shape_x * shape_y) / (shape_x); const short x = (id - z * shape_x * shape_y - y * shape_x); // Check that the point is within the block if (x < shape_x && y < shape_y && z < shape_z) { double relative_position_x, relative_position_y, relative_position_z, distance = 0.0; // calculate relative position to the zero-point and distance to it calc_distance(&relative_position_x, &relative_position_y, &relative_position_z, &distance, zero_point_x, zero_point_y, zero_point_z, grid->axis[X_]->gridpoints[x], grid->axis[Y_]->gridpoints[y], grid->axis[Z_]->gridpoints[z]); // calculate the solid harmonic value for the point RealRegularSolidHarmonics_evaluate_point_simple(relative_position_x, relative_position_y, relative_position_z, lmax, (int) shape_x * shape_y * shape_z, &cubes[id]); } return; } __global__ void RealSphericalCubeHarmonics_evaluate_grid( const int shape_x, const int shape_y, const int shape_z, const double *gridpoints_x, const double *gridpoints_y, const double *gridpoints_z, const int lmin, const int lmax, const int normalization, double *cubes, size_t pitch, const double zero_point_x, const double zero_point_y, const double zero_point_z, const int slice_offset, // the number of slices handled by this kernel call const int slice_count, // the number of slices that resides in the memory of this device const int device_slice_count) { // The result array will be in fortran with indices l, x, y, z. // This means that the x index will be the fastest to change. int x, y, z, i, l, m; double normalization_factor; getXYZ3D(&x, &y, &z); // Check that the point is within the block if (x < shape_x && y < shape_y && z+slice_offset < shape_z && z < slice_count) { // get the id of the point in the result array int id = + z * shape_y * pitch / sizeof(double) + y * pitch / sizeof(double) + x; double relative_position_x, relative_position_y, relative_position_z, distance = 0.0; // calculate relative position to the zero-point and distance to it calc_distance(&relative_position_x, &relative_position_y, &relative_position_z, &distance, zero_point_x, zero_point_y, zero_point_z, gridpoints_x[x], gridpoints_y[y], gridpoints_z[z+slice_offset]); // calculate the real harmonics values for the point if (distance > 1e-12) { RealSphericalHarmonics_evaluate_point_simple(relative_position_x, relative_position_y, relative_position_z, distance, lmax, (int) pitch / sizeof(double) * shape_y * device_slice_count, &cubes[id]); } else { i = 0; for (l = lmin; l <= lmax; l++) { for (m = -l; m <= l; m++) { cubes[id+i] = 0.0; i += pitch / sizeof(double) * shape_y * device_slice_count; } } if (lmin == 0) cubes[id] = 1.0; } // Multiply with normalization factor sqrt((2*l+1) / (4 * pi)), if // we are using conventional normalization if (normalization == 2) { i = 0; normalization_factor = 1.0; for (l = lmin; l <= lmax; l++) { normalization_factor = sqrt((2.0*(double)l+1.0)/(4.0*M_PI)); for (m = -l; m <= l; m++) { cubes[id+i] *= normalization_factor; i += pitch / sizeof(double) * shape_y * device_slice_count; } } } } return; } __global__ void RealSphericalHarmonics_evaluate_grid_kernel_fast(Grid3D *grid, double *cubes, const double zero_point_x, const double zero_point_y, const double zero_point_z, const int lmin, const int lmax, const int normalization ) { int i = 0; // load the number_of_lm_terms to shared memory /* extern __shared__ int shared_memory[]; int *number_of_lm_terms = shared_memory; i = threadIdx.x; while (i < ijk_max_) { number_of_lm_terms[i] = harmonics->number_of_lm_terms[i]; i += blockDim.x; } __syncthreads(); i = threadIdx.x; int *lm_indices = &shared_memory[ijk_max_]; while (i < number_of_lm_terms[ijk_max_-1]) { lm_indices[i] = harmonics->lm_indices[i]; i += blockDim.x; } __syncthreads(); // load the coefficients to shared memory i = threadIdx.x; double *coefficients = (double * )&shared_memory[ijk_max_+number_of_lm_terms[ijk_max_-1]]; while (i < number_of_lm_terms[ijk_max_-1]) { coefficients[i] = harmonics->new_coefficients[i]; i += blockDim.x; } __syncthreads();*/ // get the id of the point (We are using only the first ) const int id=threadIdx.x + blockIdx.x * blockDim.x; const int shape_x = grid->axis[X_]->ncell * (grid->axis[X_]->nlip - 1) + 1; const int shape_y = grid->axis[Y_]->ncell * (grid->axis[Y_]->nlip - 1) + 1; const int shape_z = grid->axis[Z_]->ncell * (grid->axis[Z_]->nlip - 1) + 1; // The result array will be in fortran with indices l, x, y, z. // This means that the x index will be the fastest to change. const int z = id / (shape_x * shape_y); const int y = (id - z * shape_x * shape_y) / (shape_x); const int x = (id - z * shape_x * shape_y - y * shape_x); int l = 0; int m = 0; double normalization_factor = 0.0; // Check that the point is within the block if (x < shape_x && y < shape_y && z < shape_z) { // get pointer to the result array value we are evaluating // first get the number of lm-pairs //const int address = z * (shape_x * shape_y) // + y * (shape_x) // + x; double relative_position_x, relative_position_y, relative_position_z, distance = 0.0; // calculate relative position to the zero-point and distance to it calc_distance(&relative_position_x, &relative_position_y, &relative_position_z, &distance, zero_point_x, zero_point_y, zero_point_z, grid->axis[X_]->gridpoints[x], grid->axis[Y_]->gridpoints[y], grid->axis[Z_]->gridpoints[z]); // calculate the solid harmonic values for the point //RealRegularSolidHarmonics_evaluate_point_new(harmonics, relative_position_x, relative_position_y, // relative_position_z, (int) shape_x * shape_y * shape_z, // number_of_lm_terms, coefficients, lm_indices, &cubes[address]); if (distance > 1e-4) { RealSphericalHarmonics_evaluate_point_simple(relative_position_x, relative_position_y, relative_position_z, distance, lmax, (int) shape_x * shape_y * shape_z, &cubes[id]); } else { cubes[id] = 1.0; } // Multiply with normalization factor sqrt((2*l+1) / (4 * pi)), if // we are using conventional normalization if (normalization == 2) { i = 0; normalization_factor = 1.0; for (l = lmin; l <= lmax; l++) { normalization_factor = sqrt((2.0*(double)l+1.0)/(4.0*M_PI)); for (m = -l; m <= l; m++) { cubes[id+i] *= normalization_factor; i += shape_x*shape_y*shape_z; } } } } return; } __host__ inline void check_cuda_errors(const char *filename, const int line_number) { #ifdef DEBUG_CUDA cudaDeviceSynchronize(); #endif cudaError_t error = cudaGetLastError(); if(error != cudaSuccess) { printf("CUDA error at %s:%i: %s\n", filename, line_number, cudaGetErrorString(error)); exit(-1); } } /* * Set cube values withing width, height and depth to zero */ __global__ void set_cube_to_zero(double *cube, long pitch, long row_count, const int width, const int height, const int depth, const int size) { // get the id of the point (We are using only the first ) const int id=threadIdx.x + blockIdx.x * blockDim.x; const int shape_x = width; const int shape_y = height; // The result array will be in fortran with indices l, x, y, z. // This means that the x index will be the fastest to change. const int z = id / (shape_x * shape_y); const int y = (id - z * shape_x * shape_y) / (shape_x); const int x = (id - z * shape_x * shape_y - y * shape_x); long cube_pointer; double *cube_value; if (x < width && y < height && z < depth) { cube_pointer = (long)cube + x * size + y * pitch + z * row_count * pitch; cube_value = (double *)cube_pointer; *cube_value = 0.0; } } extern "C" void cube_set_to_zero_(int *, int *, int *, int *, long *, long *, long *, long *, int *, int *); extern "C" void cube_set_to_zero_(int *deviceID, int *width, int *height, int *depth, long *devPtr, long *pitch, long *x_size, long *y_size, int *size, int *number_of_devices) { // Allocate cube // create extent for the cube int device = (*deviceID-1)%(*number_of_devices); cudaSetDevice(device); int blockSize; // The launch configurator returned block size int minGridSize; // The minimum grid size needed to achieve the // maximum occupancy for a full device // launch int gridSize; // The actual grid size needed, based on input // size cudaOccupancyMaxPotentialBlockSize( &minGridSize, &blockSize, (void*)set_cube_to_zero, 0, (*width) * (*height) * (*depth)); // Round up according to array size gridSize = ((*width) * (*height) * (*depth) + blockSize - 1) / blockSize; //printf("device: %d, gridsize: %d, blocksize:%d, dev cube: %ld, pitch: %ld, y_size: %ld, width: %d, height: %d, depth: %d", // device, gridSize, blockSize, *devPtr, *pitch, *y_size, *width, *height, *depth //); set_cube_to_zero<<<gridSize, blockSize>>> ((double *)*devPtr, *pitch, *y_size, *width, *height, *depth, *size); // cudaExtent extent = make_cudaExtent(*width * sizeof(RP), *height, *depth); // cudaPitchedPtr devPitchedPtr = make_cudaPitchedPtr((void *)*devPtr, *pitch, *x_size, *y_size); // printf("setting zero ptr %ld, xsize: %ld, ysize: %ld, pitch: %ld", *devPtr, devPitchedPtr.xsize, devPitchedPtr.ysize, devPitchedPtr.pitch); // set_cube_to_zero<<<>>> check_cuda_errors(__FILE__, __LINE__); return; } /************************************************* * Host RealHarmonics functionality * * ***********************************************/ void RealHarmonics::initRealHarmonics(int lmin, int lmax, int normalization, StreamContainer *streamContainer) { this->lmin = lmin; this->lmax = lmax; this->normalization = normalization; this->streamContainer = streamContainer; } /************************************************* * Host RealCubeHarmonics functionality * * ***********************************************/ int *RealCubeHarmonics::getShape() { return this->shape; } double **RealCubeHarmonics::getDeviceResults() { return this->device_results; } double *RealCubeHarmonics::getDeviceResults(int device) { return this->device_results[device]; } size_t *RealCubeHarmonics::getDevicePitches() { return this->device_pitches; } size_t RealCubeHarmonics::getDevicePitch(int device) { return this->device_pitches[device]; } void RealCubeHarmonics::initCubeHarmonics(int lmin, int lmax, int normalization, int shape[3], StreamContainer *streamContainer) { this->initRealHarmonics(lmin, lmax, normalization, streamContainer); // allocate space for device cube pointers this->device_results = new double*[this->streamContainer->getNumberOfDevices()]; this->device_pitches = new size_t[this->streamContainer->getNumberOfDevices()]; this->device_copies = new RealHarmonics*[this->streamContainer->getNumberOfDevices()]; // copy the shape this->shape[X_] = shape[X_]; this->shape[Y_] = shape[Y_]; this->shape[Z_] = shape[Z_]; // the limits of the lmax array int ilmmax = (this->lmax+1)*(this->lmax+1); //int ilmmin = (this->lmin)*(this->lmin); for (int device = 0; device < this->streamContainer->getNumberOfDevices(); device ++) { // set the correct GPU this->streamContainer->setDevice(device); cudaPitchedPtr pointer; // get the portion that is handled by device with order number 'device' int device_slice_count = shape[Z_] / this->streamContainer->getNumberOfDevices() + ((shape[Z_] % this->streamContainer->getNumberOfDevices()) > device); // allocate memory for entire shape for the main pointers cudaExtent extent = make_cudaExtent(shape[X_] * sizeof(double), shape[Y_], device_slice_count * ilmmax ); cudaMalloc3D (&pointer, extent); check_cuda_errors(__FILE__, __LINE__); this->device_pitches[device] = pointer.pitch; this->device_results[device] = (double *) pointer.ptr; // allocate the device memory and copy cudaMalloc(&this->device_copies[device], sizeof(*this)); cudaMemcpy(this->device_copies[device], this, sizeof(*this), cudaMemcpyHostToDevice); check_cuda_errors(__FILE__, __LINE__); } } void RealCubeHarmonics::evaluate(Grid3D *grid, double center[3]) { check_cuda_errors(__FILE__, __LINE__); int slice_offset = 0; for (int device = 0; device < this->streamContainer->getNumberOfDevices(); device ++) { // set the correct GPU this->streamContainer->setDevice(device); size_t device_pitch = this->getDevicePitch(device); double *device_results = this->getDeviceResults(device); // get the portion that is handled by device with order number 'device' int device_slice_count = shape[Z_] / this->streamContainer->getNumberOfDevices() + ((shape[Z_] % this->streamContainer->getNumberOfDevices()) > device); for (int stream = 0; stream < this->streamContainer->getStreamsPerDevice(); stream ++) { // get the portion that is handled by stream with order number 'stream' int slice_count = device_slice_count / this->streamContainer->getStreamsPerDevice() + ((device_slice_count % this->streamContainer->getStreamsPerDevice()) > stream); // do the per-stream evaluation (NOTE: the functions hide the kernel calls to the spherical harmonics / solid harmonics) this->evaluateSingleStream(device_results, device_pitch, device, grid, center, slice_count, device_slice_count, slice_offset, this->streamContainer->getStream(device, stream)); check_cuda_errors(__FILE__, __LINE__); // add to the slice_offset slice_offset += slice_count; // add to the cube pointer device_results += device_pitch / sizeof(double) * this->shape[Y_] * slice_count; } } } /* * Note: works best if the host_results is registered/inited as pinned before using this method * * @param host_results pointer to a four dimensional array of shape (shape[X_], shape[Y_], shape[Z_], lmax) * @param host_results_shape (x, y, z, l) */ void RealCubeHarmonics::download(double *host_results, int host_results_shape[4]) { check_cuda_errors(__FILE__, __LINE__); for (int device = 0; device < this->streamContainer->getNumberOfDevices(); device ++) { // set the correct GPU this->streamContainer->setDevice(device); // and get the corresponding cube pointer and pitch size_t device_pitch = this->getDevicePitch(device); double *device_results = this->getDeviceResults(device); // get the portion that is handled by device with order number 'device' int device_slice_count = this->shape[Z_] / this->streamContainer->getNumberOfDevices() + ((this->shape[Z_] * (this->lmax+1) % this->streamContainer->getNumberOfDevices()) > device); for (int stream = 0; stream < this->streamContainer->getStreamsPerDevice(); stream ++) { // get the portion that is handled by stream with order number 'stream' int slice_count = device_slice_count / this->streamContainer->getStreamsPerDevice() + ((device_slice_count % this->streamContainer->getStreamsPerDevice()) > stream); int lm_offset = 0; int lm_device_offset = 0; if (slice_count > 0) { for (int n = this->lmin*this->lmin; n < (this->lmax +1) * (this->lmax +1); n++) { cudaMemcpy3DParms memCopyParameters = {0}; memCopyParameters.dstPtr = make_cudaPitchedPtr(&host_results[lm_offset], host_results_shape[X_]*sizeof(double), host_results_shape[X_], host_results_shape[Y_]); memCopyParameters.srcPtr = make_cudaPitchedPtr(&device_results[lm_device_offset], device_pitch, shape[X_], shape[Y_]); memCopyParameters.extent = make_cudaExtent(this->shape[X_] * sizeof(double), this->shape[Y_], slice_count); memCopyParameters.kind = cudaMemcpyDeviceToHost; // copy the f1 cube to device: 3D cudaMemcpy3DAsync(&memCopyParameters, *this->streamContainer->getStream(device, stream)); check_cuda_errors(__FILE__, __LINE__); // add to the offsets caused by the l lm_offset += this->shape[X_] * this->shape[Y_] * this->shape[Z_]; lm_device_offset += device_pitch / sizeof(double) * this->shape[Y_] * device_slice_count; } // add to the result pointers host_results += slice_count * host_results_shape[X_] * host_results_shape[Y_]; device_results += device_pitch / sizeof(double) * this->shape[Y_] * slice_count; } } } } void RealCubeHarmonics::registerHostResultArray(double *host_results, int host_results_shape[4]) { // register host memory for download cudaHostRegister(host_results, host_results_shape[0]*host_results_shape[1]*host_results_shape[2]*host_results_shape[3] * sizeof(double), cudaHostRegisterPortable); check_cuda_errors(__FILE__, __LINE__); } void RealCubeHarmonics::unregisterHostResultArray(double *host_results) { // unregister host memory cudaHostUnregister(host_results); check_cuda_errors(__FILE__, __LINE__); } void RealCubeHarmonics::destroy() { for (int device = 0; device < this->streamContainer->getNumberOfDevices(); device ++) { cudaFree(this->device_results[device]); cudaFree(this->device_copies[device]); } delete[] this->device_results; delete[] this->device_pitches; delete[] this->device_copies; } /**************************************************** * Host RealRegularSolidCubeHarmonics functionality * ****************************************************/ RealRegularSolidCubeHarmonics::RealRegularSolidCubeHarmonics(int lmin, int lmax, int normalization, int shape[3], StreamContainer *streamContainer) { this->initCubeHarmonics(lmin, lmax, normalization, shape, streamContainer); } void RealRegularSolidCubeHarmonics::evaluateSingleStream(double *device_results, size_t device_pitch, int device, Grid3D *grid3d, double center[3], int slice_count, int device_slice_count, int slice_offset, cudaStream_t *stream) { if (slice_count > 0) { // get the launch configuration dim3 grid, block; getCubeLaunchConfiguration(&grid, &block, this->shape, slice_count, 256); // call the kernel RealRegularSolidCubeHarmonics_evaluate_grid <<< grid, block, 0, *stream >>> ( grid3d->getShape(X_), grid3d->getShape(Y_), grid3d->getShape(Z_), grid3d->axis[X_]->device_gridpoints[device], grid3d->axis[Y_]->device_gridpoints[device], grid3d->axis[Z_]->device_gridpoints[device], lmax, device_results, device_pitch, center[X_], center[Y_], center[Z_], slice_offset, slice_count, device_slice_count, device); check_cuda_errors(__FILE__, __LINE__); } } /**************************************************** * Host RealSphericalCubeHarmonics functionality * ****************************************************/ RealSphericalCubeHarmonics::RealSphericalCubeHarmonics(int lmin, int lmax, int normalization, int shape[3], StreamContainer *streamContainer) { this->initCubeHarmonics(lmin, lmax, normalization, shape, streamContainer); } void RealSphericalCubeHarmonics::evaluateSingleStream(double *device_results, size_t device_pitch, int device, Grid3D *grid3d, double center[3], int slice_count, int device_slice_count, int slice_offset, cudaStream_t *stream) { if (slice_count > 0) { // get the launch configuration dim3 grid, block; getCubeLaunchConfiguration(&grid, &block, this->shape, slice_count, 256); // call the kernel RealSphericalCubeHarmonics_evaluate_grid <<< grid, block, 0, *stream >>> ( grid3d->getShape(X_), grid3d->getShape(Y_), grid3d->getShape(Z_), grid3d->axis[X_]->device_gridpoints[device], grid3d->axis[Y_]->device_gridpoints[device], grid3d->axis[Z_]->device_gridpoints[device], this->lmin, this->lmax, this->normalization, device_results, device_pitch, center[X_], center[Y_], center[Z_], slice_offset, slice_count, device_slice_count ); check_cuda_errors(__FILE__, __LINE__); } } /******************************************************* * Fortran interfaces - RealRegularSolidCubeHarmonics * *******************************************************/ extern "C" RealRegularSolidCubeHarmonics *realregularsolidcubeharmonics_init_cuda(int lmin, int lmax, int normalization, int shape[3], StreamContainer *streamContainer) { return new RealRegularSolidCubeHarmonics(lmin, lmax, normalization, shape, streamContainer); } extern "C" void realregularsolidcubeharmonics_destroy_cuda(RealRegularSolidCubeHarmonics *harmonics) { harmonics->destroy(); } extern "C" void realregularsolidcubeharmonics_evaluate_cuda(RealRegularSolidCubeHarmonics *harmonics, Grid3D *grid, double center[3]) { harmonics->evaluate(grid, center); } /**************************************************** * Fortran interfaces - RealSphericalCubeHarmonics * ****************************************************/ extern "C" RealSphericalCubeHarmonics *realsphericalcubeharmonics_init_cuda(int lmin, int lmax, int normalization, int shape[3], StreamContainer *streamContainer) { return new RealSphericalCubeHarmonics(lmin, lmax, normalization, shape, streamContainer); } extern "C" void realsphericalcubeharmonics_download_cuda(RealSphericalCubeHarmonics *harmonics, double *host_results, int host_results_shape[4]) { harmonics->download(host_results, host_results_shape); } extern "C" void realsphericalcubeharmonics_destroy_cuda(RealSphericalCubeHarmonics *harmonics) { harmonics->destroy(); } extern "C" void realsphericalcubeharmonics_evaluate_cuda(RealSphericalCubeHarmonics *harmonics, Grid3D *grid, double center[3]) { harmonics->evaluate(grid, center); } /**************************************************** * Fortran interfaces - RealCubeHarmonics * ****************************************************/ extern "C" void realcubeharmonics_register_result_array_cuda(RealCubeHarmonics *harmonics, double *host_results, int host_results_shape[4]) { harmonics->registerHostResultArray(host_results, host_results_shape); } extern "C" void realcubeharmonics_unregister_result_array_cuda(RealCubeHarmonics *harmonics, double *host_results, int host_results_shape[4]) { harmonics->unregisterHostResultArray(host_results); }
4a9c82f52d1add4164e718acdb06d856fa52d935.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /** * Unfortunately these have to be copied into the individual source files * since pycuda doesn't seem to handle includes very well. */ extern const int B; extern const int K; /** * Helper function to sum across a block. * Assume pS_data is already in shared memory * Only the first thread returns a value in pSum */ __device__ void reduceBlock( float pSdata[B], float* pSum, int op ) { int idx = threadIdx.x * blockDim.y + threadIdx.y; // Sync all threads across the block __syncthreads(); // Calculate the minimum value by doing a reduction int half = (blockDim.x*blockDim.y) / 2; if( idx < half ) { while( half > 0 ) { if(idx < half) { switch(op) { case OP_SUM: pSdata[idx] = pSdata[idx] + pSdata[idx + half]; break; case OP_MULT: pSdata[idx] = pSdata[idx] * pSdata[idx + half]; break; default: // default to the identity // TODO: throw error? pSdata[idx] = pSdata[idx]; break; } } half = half / 2; __syncthreads(); } } // Store the minimum value back to global memory if (idx == 0) { pSum[0] = pSdata[0]; } } /* * Sample a Gamma RV using the Marsaglia Tsang algorithm. This * is much faster than algorithms based on Mersenne twister used * by Numpy. We do have some overhead from generating extra unif * and normal RVs that are just rejected. * Our assumption is that W.H.P. we will successfully generate * a RV on at least one of the 1024 threads per block. * * The vanilla Marsaglia alg requires alpha > 1.0 * pU is a pointer to an array of uniform random variates, * one for each thread. pN similarly points to normal * random variates. */ __global__ void sampleGammaRV(float* pU, float* pN, float* pAlpha, float* pBeta, float* pG, int* pStatus) { int x = threadIdx.x; int ki = blockIdx.x; int kj = blockIdx.y; int k_ind = ki*gridDim.y + kj; float u = pU[k_ind*blockDim.x + x]; float n = pN[k_ind*blockDim.x + x]; __shared__ float gamma[B]; __shared__ bool accept[B]; accept[x] = false; float a = pAlpha[k_ind]; float b = pBeta[k_ind]; if (a < 1.0) { if (x==0) { pStatus[k_ind] = ERROR_INVALID_PARAMETER; } return; } float d = a-1.0/3.0; float c = 1.0/sqrtf(9.0*d); float v = powf(1+c*n,3); // if v <= 0 this result is invalid if (v<=0) { accept[x] = false; } else if (u <=(1-0.0331*powf(n,4.0)) || (logf(u)<0.5*powf(n,2.0)+d*(1-v+logf(v)))) { // rejection sample. The second operation should be // performed with low probability. This is the "squeeze" gamma[x] = d*v; accept[x] = true; } // Reduce across block to find the first accepted sample __syncthreads(); int half = blockDim.x / 2; if( x < half ) { while( half > 0 ) { if(x < half) { // if the latter variate was accepted but the current // was not, copy the latter to the current. If the current // was accepted we keep it. If neither was accepted we // don't change anything. if (!accept[x] && accept[x+half]) { gamma[x] = gamma[x+half]; accept[x] = true; } } half = half / 2; __syncthreads(); } } // Store the sample to global memory, or return error if failure if (x == 0) { if (accept[0]) { // rescale the result (assume rate characterization) pG[k_ind] = gamma[0]/b; pStatus[k_ind] = ERROR_SUCCESS; } else { pStatus[k_ind] = ERROR_SAMPLE_FAILURE; } } }
4a9c82f52d1add4164e718acdb06d856fa52d935.cu
/** * Unfortunately these have to be copied into the individual source files * since pycuda doesn't seem to handle includes very well. */ extern const int B; extern const int K; /** * Helper function to sum across a block. * Assume pS_data is already in shared memory * Only the first thread returns a value in pSum */ __device__ void reduceBlock( float pSdata[B], float* pSum, int op ) { int idx = threadIdx.x * blockDim.y + threadIdx.y; // Sync all threads across the block __syncthreads(); // Calculate the minimum value by doing a reduction int half = (blockDim.x*blockDim.y) / 2; if( idx < half ) { while( half > 0 ) { if(idx < half) { switch(op) { case OP_SUM: pSdata[idx] = pSdata[idx] + pSdata[idx + half]; break; case OP_MULT: pSdata[idx] = pSdata[idx] * pSdata[idx + half]; break; default: // default to the identity // TODO: throw error? pSdata[idx] = pSdata[idx]; break; } } half = half / 2; __syncthreads(); } } // Store the minimum value back to global memory if (idx == 0) { pSum[0] = pSdata[0]; } } /* * Sample a Gamma RV using the Marsaglia Tsang algorithm. This * is much faster than algorithms based on Mersenne twister used * by Numpy. We do have some overhead from generating extra unif * and normal RVs that are just rejected. * Our assumption is that W.H.P. we will successfully generate * a RV on at least one of the 1024 threads per block. * * The vanilla Marsaglia alg requires alpha > 1.0 * pU is a pointer to an array of uniform random variates, * one for each thread. pN similarly points to normal * random variates. */ __global__ void sampleGammaRV(float* pU, float* pN, float* pAlpha, float* pBeta, float* pG, int* pStatus) { int x = threadIdx.x; int ki = blockIdx.x; int kj = blockIdx.y; int k_ind = ki*gridDim.y + kj; float u = pU[k_ind*blockDim.x + x]; float n = pN[k_ind*blockDim.x + x]; __shared__ float gamma[B]; __shared__ bool accept[B]; accept[x] = false; float a = pAlpha[k_ind]; float b = pBeta[k_ind]; if (a < 1.0) { if (x==0) { pStatus[k_ind] = ERROR_INVALID_PARAMETER; } return; } float d = a-1.0/3.0; float c = 1.0/sqrtf(9.0*d); float v = powf(1+c*n,3); // if v <= 0 this result is invalid if (v<=0) { accept[x] = false; } else if (u <=(1-0.0331*powf(n,4.0)) || (logf(u)<0.5*powf(n,2.0)+d*(1-v+logf(v)))) { // rejection sample. The second operation should be // performed with low probability. This is the "squeeze" gamma[x] = d*v; accept[x] = true; } // Reduce across block to find the first accepted sample __syncthreads(); int half = blockDim.x / 2; if( x < half ) { while( half > 0 ) { if(x < half) { // if the latter variate was accepted but the current // was not, copy the latter to the current. If the current // was accepted we keep it. If neither was accepted we // don't change anything. if (!accept[x] && accept[x+half]) { gamma[x] = gamma[x+half]; accept[x] = true; } } half = half / 2; __syncthreads(); } } // Store the sample to global memory, or return error if failure if (x == 0) { if (accept[0]) { // rescale the result (assume rate characterization) pG[k_ind] = gamma[0]/b; pStatus[k_ind] = ERROR_SUCCESS; } else { pStatus[k_ind] = ERROR_SAMPLE_FAILURE; } } }
45fa28824151fa43272f570226b63d988c127940.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ #define EIGEN_USE_GPU #include "paddle/fluid/operators/sgd_op.h" #include "paddle/fluid/platform/cuda_helper.h" namespace paddle { namespace operators { namespace { template <typename T> __global__ void SGDKernel(const T* g, const T* p, const T* learning_rate, const int num, T* p_out) { T lr = learning_rate[0]; int grid_size = blockDim.x * gridDim.x; for (int i = blockIdx.x * blockDim.x + threadIdx.x; i < num; i += grid_size) { T g_data = g[i]; T p_data = p[i]; p_out[i] = p_data - lr * g_data; } } template <typename T, int block_size> __global__ void SparseSGDFunctorKernel(const T* selected_rows, const int64_t* rows, const T* learning_rate, T* tensor_out, int64_t row_numel) { const int ty = blockIdx.y; int tid = threadIdx.x; selected_rows += ty * row_numel; tensor_out += rows[ty] * row_numel; for (int index = tid; index < row_numel; index += block_size) { // Since index in rows of SelectedRows can be duplicate, we have to use // Atomic Operation to avoid concurrent write error. paddle::platform::CudaAtomicAdd( tensor_out + index, -1.0 * learning_rate[0] * selected_rows[index]); } } } // namespace template <typename T> class SGDOpCUDAKernel : public framework::OpKernel<T> { public: void Compute(const framework::ExecutionContext& ctx) const override { auto* param = ctx.Input<framework::Tensor>("Param"); auto* param_out = ctx.Output<framework::Tensor>("ParamOut"); auto* learning_rate = ctx.Input<framework::Tensor>("LearningRate"); auto* grad_var = ctx.InputVar("Grad"); // Actually, all tensors are LoDTensor except SelectedRows. if (grad_var->IsType<framework::LoDTensor>()) { param_out->mutable_data<T>(ctx.GetPlace()); auto* grad = ctx.Input<framework::Tensor>("Grad"); auto* grad_data = grad->data<T>(); auto* param_data = param->data<T>(); auto* param_out_data = param_out->data<T>(); int block = 512; int grid = (param->numel() + block - 1) / block; hipLaunchKernelGGL(( SGDKernel<T>), dim3(grid), dim3(block), 0, ctx.cuda_device_context().stream(), grad_data, param_data, learning_rate->data<T>(), param->numel(), param_out_data); } else if (grad_var->IsType<framework::SelectedRows>()) { // TODO(qijun): In Sparse SGD operator, in-place update is enforced. // This manual optimization brings difficulty to track data dependency. // It's better to find a more elegant solution. PADDLE_ENFORCE_EQ(param, param_out); auto* grad = ctx.Input<framework::SelectedRows>("Grad"); auto in_height = grad->height(); auto out_dims = param_out->dims(); PADDLE_ENFORCE_EQ(in_height, out_dims[0]); auto& in_value = grad->value(); framework::Vector<int64_t> in_rows(grad->rows()); int64_t in_row_numel = in_value.numel() / in_rows.size(); PADDLE_ENFORCE_EQ(in_row_numel, param_out->numel() / in_height); auto* in_data = in_value.data<T>(); auto* out_data = param_out->data<T>(); const int block_size = 256; dim3 threads(block_size, 1); dim3 grid(1, in_rows.size()); hipLaunchKernelGGL(( SparseSGDFunctorKernel< T, 256>), dim3(grid), dim3(threads), 0, ctx.cuda_device_context().stream(), in_data, in_rows.CUDAData(ctx.GetPlace()), learning_rate->data<T>(), out_data, in_row_numel); } else { PADDLE_THROW("Unsupported Variable Type of Grad"); } } }; } // namespace operators } // namespace paddle namespace ops = paddle::operators; REGISTER_OP_CUDA_KERNEL(sgd, ops::SGDOpCUDAKernel<float>, ops::SGDOpCUDAKernel<double>);
45fa28824151fa43272f570226b63d988c127940.cu
/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ #define EIGEN_USE_GPU #include "paddle/fluid/operators/sgd_op.h" #include "paddle/fluid/platform/cuda_helper.h" namespace paddle { namespace operators { namespace { template <typename T> __global__ void SGDKernel(const T* g, const T* p, const T* learning_rate, const int num, T* p_out) { T lr = learning_rate[0]; int grid_size = blockDim.x * gridDim.x; for (int i = blockIdx.x * blockDim.x + threadIdx.x; i < num; i += grid_size) { T g_data = g[i]; T p_data = p[i]; p_out[i] = p_data - lr * g_data; } } template <typename T, int block_size> __global__ void SparseSGDFunctorKernel(const T* selected_rows, const int64_t* rows, const T* learning_rate, T* tensor_out, int64_t row_numel) { const int ty = blockIdx.y; int tid = threadIdx.x; selected_rows += ty * row_numel; tensor_out += rows[ty] * row_numel; for (int index = tid; index < row_numel; index += block_size) { // Since index in rows of SelectedRows can be duplicate, we have to use // Atomic Operation to avoid concurrent write error. paddle::platform::CudaAtomicAdd( tensor_out + index, -1.0 * learning_rate[0] * selected_rows[index]); } } } // namespace template <typename T> class SGDOpCUDAKernel : public framework::OpKernel<T> { public: void Compute(const framework::ExecutionContext& ctx) const override { auto* param = ctx.Input<framework::Tensor>("Param"); auto* param_out = ctx.Output<framework::Tensor>("ParamOut"); auto* learning_rate = ctx.Input<framework::Tensor>("LearningRate"); auto* grad_var = ctx.InputVar("Grad"); // Actually, all tensors are LoDTensor except SelectedRows. if (grad_var->IsType<framework::LoDTensor>()) { param_out->mutable_data<T>(ctx.GetPlace()); auto* grad = ctx.Input<framework::Tensor>("Grad"); auto* grad_data = grad->data<T>(); auto* param_data = param->data<T>(); auto* param_out_data = param_out->data<T>(); int block = 512; int grid = (param->numel() + block - 1) / block; SGDKernel<T><<<grid, block, 0, ctx.cuda_device_context().stream()>>>( grad_data, param_data, learning_rate->data<T>(), param->numel(), param_out_data); } else if (grad_var->IsType<framework::SelectedRows>()) { // TODO(qijun): In Sparse SGD operator, in-place update is enforced. // This manual optimization brings difficulty to track data dependency. // It's better to find a more elegant solution. PADDLE_ENFORCE_EQ(param, param_out); auto* grad = ctx.Input<framework::SelectedRows>("Grad"); auto in_height = grad->height(); auto out_dims = param_out->dims(); PADDLE_ENFORCE_EQ(in_height, out_dims[0]); auto& in_value = grad->value(); framework::Vector<int64_t> in_rows(grad->rows()); int64_t in_row_numel = in_value.numel() / in_rows.size(); PADDLE_ENFORCE_EQ(in_row_numel, param_out->numel() / in_height); auto* in_data = in_value.data<T>(); auto* out_data = param_out->data<T>(); const int block_size = 256; dim3 threads(block_size, 1); dim3 grid(1, in_rows.size()); SparseSGDFunctorKernel< T, 256><<<grid, threads, 0, ctx.cuda_device_context().stream()>>>( in_data, in_rows.CUDAData(ctx.GetPlace()), learning_rate->data<T>(), out_data, in_row_numel); } else { PADDLE_THROW("Unsupported Variable Type of Grad"); } } }; } // namespace operators } // namespace paddle namespace ops = paddle::operators; REGISTER_OP_CUDA_KERNEL(sgd, ops::SGDOpCUDAKernel<float>, ops::SGDOpCUDAKernel<double>);
23923daa3bb3c32783d920f9cc3e16d9b09111d6.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /******************************************************************************* * Copyright (c) 2015-2018 Skymind, Inc. * * This program and the accompanying materials are made available under the * terms of the Apache License, Version 2.0 which is available at * https://www.apache.org/licenses/LICENSE-2.0. * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the * License for the specific language governing permissions and limitations * under the License. * * SPDX-License-Identifier: Apache-2.0 ******************************************************************************/ // // Created by Yurii Shyrma on 02.01.2018 // #include <ops/declarable/helpers/stack.h> #include <helpers/ShapeUtils.h> #include <array/ResultSet.h> #include <cuda_exception.h> #include <TAD.h> #include <PointersManager.h> #include <ConstantTadHelper.h> namespace nd4j { namespace ops { namespace helpers { template <typename T> static __global__ void stackKernel(void** inputList, void** inputShapeList, int inputListLength, Nd4jLong arrLen, void* outputBuffer, Nd4jLong* tadShape, Nd4jLong *tadOffsets) { //, Nd4jLong* tadShape, Nd4jLong* tadOffsets) { __shared__ int arrIdx, blocksPerArr; __shared__ T *z; __shared__ Nd4jLong *zShapeInfo, *xShapeInfo, arrLenPerBlock, start, end, offsetZ, zLength; if (threadIdx.x == 0) { z = reinterpret_cast<T*>(outputBuffer); } __syncthreads(); for (int t = blockIdx.x; t < inputListLength; t += gridDim.x) { auto tZ = z + tadOffsets[t]; auto tX = reinterpret_cast<T*>(inputList[t]); auto xShape = reinterpret_cast<Nd4jLong*>(inputShapeList[t]); for (int e = threadIdx.x; e < arrLen; e += blockDim.x) { tZ[shape::getIndexOffset(e, tadShape, arrLen)] = tX[shape::getIndexOffset(e, xShape, arrLen)]; } } } /////////////////////////////////////////////////////////////////// template <typename T> static void stack_(nd4j::LaunchContext * context, const std::vector<NDArray*>& inArrs, NDArray* outArr, const int dim) { if(inArrs[0]->isScalar()) { //#pragma omp parallel for for (size_t i = 0; i < inArrs.size(); ++i) { inArrs[i]->syncToHost(); outArr->p(i, inArrs[i]->e<T>(0)); } outArr->syncToDevice(); } else { //Nd4jLong **dInShapeInfo; //void **dInBuffers; std::vector<void const*> inputList(inArrs.size()); std::vector<Nd4jLong const*> inputShapeList(inArrs.size()); auto stream = context->getCudaStream(); for (size_t i = 0; i < inputList.size(); ++i) { inputList[i] = inArrs[i]->getSpecialBuffer(); inputShapeList[i] = inArrs[i]->getSpecialShapeInfo(); } std::vector<int> axis = ShapeUtils::evalDimsToExclude(outArr->rankOf(), {dim}); auto packX = nd4j::ConstantTadHelper::getInstance()->tadForDimensions(outArr->getShapeInfo(), axis); PointersManager manager(context, "helpers::stack"); auto dInBuffers = (void **) manager.replicatePointer(inputList.data(), inputList.size() * sizeof(Nd4jLong*)); auto dInShapeInfo = (void **) manager.replicatePointer(inputShapeList.data(), inputShapeList.size() * sizeof(Nd4jLong*)); dim3 launchDims(inArrs.size(), inArrs[0]->lengthOf(), 1024); hipLaunchKernelGGL(( stackKernel<T>), dim3(launchDims.x), dim3(launchDims.y), launchDims.z, *stream, (void**)dInBuffers, (void**)dInShapeInfo, inputList.size(), inArrs[0]->lengthOf(), outArr->specialBuffer(), packX.specialShapeInfo(), packX.specialOffsets()); //, dTadShape, dTadOffsets); manager.synchronize(); } } void stack(nd4j::LaunchContext * context, const std::vector<NDArray*>& inArrs, NDArray* outArr, const int dim) { BUILD_SINGLE_SELECTOR(outArr->dataType(), stack_, (context, inArrs, outArr, dim), LIBND4J_TYPES); } BUILD_SINGLE_TEMPLATE(template void stack_ , (nd4j::LaunchContext * context, const std::vector<NDArray*>& inArrs, NDArray* outArr, const int dim), LIBND4J_TYPES); } } }
23923daa3bb3c32783d920f9cc3e16d9b09111d6.cu
/******************************************************************************* * Copyright (c) 2015-2018 Skymind, Inc. * * This program and the accompanying materials are made available under the * terms of the Apache License, Version 2.0 which is available at * https://www.apache.org/licenses/LICENSE-2.0. * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the * License for the specific language governing permissions and limitations * under the License. * * SPDX-License-Identifier: Apache-2.0 ******************************************************************************/ // // Created by Yurii Shyrma on 02.01.2018 // #include <ops/declarable/helpers/stack.h> #include <helpers/ShapeUtils.h> #include <array/ResultSet.h> #include <cuda_exception.h> #include <TAD.h> #include <PointersManager.h> #include <ConstantTadHelper.h> namespace nd4j { namespace ops { namespace helpers { template <typename T> static __global__ void stackKernel(void** inputList, void** inputShapeList, int inputListLength, Nd4jLong arrLen, void* outputBuffer, Nd4jLong* tadShape, Nd4jLong *tadOffsets) { //, Nd4jLong* tadShape, Nd4jLong* tadOffsets) { __shared__ int arrIdx, blocksPerArr; __shared__ T *z; __shared__ Nd4jLong *zShapeInfo, *xShapeInfo, arrLenPerBlock, start, end, offsetZ, zLength; if (threadIdx.x == 0) { z = reinterpret_cast<T*>(outputBuffer); } __syncthreads(); for (int t = blockIdx.x; t < inputListLength; t += gridDim.x) { auto tZ = z + tadOffsets[t]; auto tX = reinterpret_cast<T*>(inputList[t]); auto xShape = reinterpret_cast<Nd4jLong*>(inputShapeList[t]); for (int e = threadIdx.x; e < arrLen; e += blockDim.x) { tZ[shape::getIndexOffset(e, tadShape, arrLen)] = tX[shape::getIndexOffset(e, xShape, arrLen)]; } } } /////////////////////////////////////////////////////////////////// template <typename T> static void stack_(nd4j::LaunchContext * context, const std::vector<NDArray*>& inArrs, NDArray* outArr, const int dim) { if(inArrs[0]->isScalar()) { //#pragma omp parallel for for (size_t i = 0; i < inArrs.size(); ++i) { inArrs[i]->syncToHost(); outArr->p(i, inArrs[i]->e<T>(0)); } outArr->syncToDevice(); } else { //Nd4jLong **dInShapeInfo; //void **dInBuffers; std::vector<void const*> inputList(inArrs.size()); std::vector<Nd4jLong const*> inputShapeList(inArrs.size()); auto stream = context->getCudaStream(); for (size_t i = 0; i < inputList.size(); ++i) { inputList[i] = inArrs[i]->getSpecialBuffer(); inputShapeList[i] = inArrs[i]->getSpecialShapeInfo(); } std::vector<int> axis = ShapeUtils::evalDimsToExclude(outArr->rankOf(), {dim}); auto packX = nd4j::ConstantTadHelper::getInstance()->tadForDimensions(outArr->getShapeInfo(), axis); PointersManager manager(context, "helpers::stack"); auto dInBuffers = (void **) manager.replicatePointer(inputList.data(), inputList.size() * sizeof(Nd4jLong*)); auto dInShapeInfo = (void **) manager.replicatePointer(inputShapeList.data(), inputShapeList.size() * sizeof(Nd4jLong*)); dim3 launchDims(inArrs.size(), inArrs[0]->lengthOf(), 1024); stackKernel<T><<<launchDims.x, launchDims.y, launchDims.z, *stream>>>((void**)dInBuffers, (void**)dInShapeInfo, inputList.size(), inArrs[0]->lengthOf(), outArr->specialBuffer(), packX.specialShapeInfo(), packX.specialOffsets()); //, dTadShape, dTadOffsets); manager.synchronize(); } } void stack(nd4j::LaunchContext * context, const std::vector<NDArray*>& inArrs, NDArray* outArr, const int dim) { BUILD_SINGLE_SELECTOR(outArr->dataType(), stack_, (context, inArrs, outArr, dim), LIBND4J_TYPES); } BUILD_SINGLE_TEMPLATE(template void stack_ , (nd4j::LaunchContext * context, const std::vector<NDArray*>& inArrs, NDArray* outArr, const int dim), LIBND4J_TYPES); } } }
ad947799acaaa6db8de3df68bc391711fd1e9f87.hip
// !!! This is a file automatically generated by hipify!!! /* * Copyright (c) 2019, NVIDIA CORPORATION. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ // Author: Prasun Gera [email protected] #include <cugraph.h> #include <rmm_utils.h> #include <algorithm> #include "traversal_common.cuh" #include "sssp.cuh" #include "sssp_kernels_hip.cuh" #include "utilities/error_utils.h" namespace cugraph { template <typename IndexType, typename DistType> void SSSP<IndexType, DistType>::setup() { // Working data // Each vertex can be in the frontier at most once ALLOC_TRY(&frontier, n * sizeof(IndexType), nullptr); ALLOC_TRY(&new_frontier, n * sizeof(IndexType), nullptr); // size of bitmaps for vertices vertices_bmap_size = (n / (8 * sizeof(int)) + 1); // size of bitmaps for edges edges_bmap_size = (nnz / (8 * sizeof(int)) + 1); // ith bit of isolated_bmap is set <=> degree of ith vertex = 0 ALLOC_TRY(&isolated_bmap, sizeof(int) * vertices_bmap_size, nullptr); // Allocate buffer for data that need to be reset every iteration iter_buffer_size = sizeof(int) * (edges_bmap_size + vertices_bmap_size) + sizeof(IndexType); ALLOC_TRY(&iter_buffer, iter_buffer_size, nullptr); // ith bit of relaxed_edges_bmap <=> ith edge was relaxed relaxed_edges_bmap = (int*)iter_buffer; // ith bit of next_frontier_bmap <=> vertex is active in the next frontier next_frontier_bmap = (int*)iter_buffer + edges_bmap_size; // num vertices in the next frontier d_new_frontier_cnt = next_frontier_bmap + vertices_bmap_size; // vertices_degree[i] = degree of vertex i ALLOC_TRY(&vertex_degree, sizeof(IndexType) * n, nullptr); // Cub working data traversal::cub_exclusive_sum_alloc( n + 1, d_cub_exclusive_sum_storage, cub_exclusive_sum_storage_bytes); // frontier_vertex_degree[i] is the degree of vertex frontier[i] ALLOC_TRY(&frontier_vertex_degree, n * sizeof(IndexType), nullptr); // exclusive sum of frontier_vertex_degree ALLOC_TRY(&exclusive_sum_frontier_vertex_degree, (n + 1) * sizeof(IndexType), nullptr); // We use buckets of edges (32 edges per bucket for now, see exact macro in // sssp_kernels). frontier_vertex_degree_buckets_offsets[i] is the index k // such as frontier[k] is the source of the first edge of the bucket // See top down kernels for more details size_t bucket_off_size = ((nnz / TOP_DOWN_EXPAND_DIMX + 1) * NBUCKETS_PER_BLOCK + 2) * sizeof(IndexType); ALLOC_TRY(&exclusive_sum_frontier_vertex_buckets_offsets, bucket_off_size, nullptr); // Repurpose d_new_frontier_cnt temporarily IndexType* d_nisolated = d_new_frontier_cnt; hipMemsetAsync(d_nisolated, 0, sizeof(IndexType), stream); // Computing isolated_bmap // Only dependent on graph - not source vertex - done once traversal::flag_isolated_vertices( n, isolated_bmap, row_offsets, vertex_degree, d_nisolated, stream); hipMemcpyAsync(&nisolated, d_nisolated, sizeof(IndexType), hipMemcpyDeviceToHost, stream); // We need nisolated to be ready to use // nisolated is the number of isolated (zero out-degree) vertices hipStreamSynchronize(stream); } template <typename IndexType, typename DistType> void SSSP<IndexType, DistType>::configure(DistType* _distances, IndexType* _predecessors, int* _edge_mask) { distances = _distances; predecessors = _predecessors; edge_mask = _edge_mask; useEdgeMask = (edge_mask != NULL); computeDistances = (distances != NULL); computePredecessors = (predecessors != NULL); // We need distances for SSSP even if the caller doesn't need them if (!computeDistances) ALLOC_TRY(&distances, n * sizeof(DistType), nullptr); // Need next_distances in either case ALLOC_TRY(&next_distances, n * sizeof(DistType), nullptr); } template <typename IndexType, typename DistType> gdf_error SSSP<IndexType, DistType>::traverse(IndexType source_vertex) { // Init distances to infinities traversal::fill_vec(distances, n, traversal::vec_t<DistType>::max, stream); traversal::fill_vec( next_distances, n, traversal::vec_t<DistType>::max, stream); // If needed, set all predecessors to non-existent (-1) if (computePredecessors) { hipMemsetAsync(predecessors, -1, n * sizeof(IndexType), stream); } // // Initial frontier // hipMemsetAsync(&distances[source_vertex], 0, sizeof(DistType), stream); hipMemsetAsync(&next_distances[source_vertex], 0, sizeof(DistType), stream); int current_isolated_bmap_source_vert = 0; hipMemcpyAsync(&current_isolated_bmap_source_vert, &isolated_bmap[source_vertex / INT_SIZE], sizeof(int), hipMemcpyDeviceToHost); // We need current_isolated_bmap_source_vert hipStreamSynchronize(stream); int m = (1 << (source_vertex % INT_SIZE)); // If source is isolated (zero outdegree), we are done if ((m & current_isolated_bmap_source_vert)) { // Init distances and predecessors are done; stream is synchronized return GDF_SUCCESS; } // Adding source_vertex to init frontier hipMemcpyAsync(&frontier[0], &source_vertex, sizeof(IndexType), hipMemcpyHostToDevice, stream); // Number of vertices in the frontier and number of out-edges from the // frontier IndexType mf, nf; nf = 1; int iters = 0; while (nf > 0) { // Typical pre-top down workflow. set_frontier_degree + exclusive-scan traversal::set_frontier_degree( frontier_vertex_degree, frontier, vertex_degree, nf, stream); traversal::exclusive_sum(d_cub_exclusive_sum_storage, cub_exclusive_sum_storage_bytes, frontier_vertex_degree, exclusive_sum_frontier_vertex_degree, nf + 1, stream); hipMemcpyAsync(&mf, &exclusive_sum_frontier_vertex_degree[nf], sizeof(IndexType), hipMemcpyDeviceToHost, stream); // We need mf to know the next kernel's launch dims hipStreamSynchronize(stream); traversal::compute_bucket_offsets( exclusive_sum_frontier_vertex_degree, exclusive_sum_frontier_vertex_buckets_offsets, nf, mf, stream); // Reset the transient structures to 0 hipMemsetAsync(iter_buffer, 0, iter_buffer_size, stream); sssp_kernels::frontier_expand( row_offsets, col_indices, edge_weights, frontier, nf, mf, new_frontier, d_new_frontier_cnt, exclusive_sum_frontier_vertex_degree, exclusive_sum_frontier_vertex_buckets_offsets, distances, next_distances, predecessors, edge_mask, next_frontier_bmap, relaxed_edges_bmap, isolated_bmap, stream); hipMemcpyAsync(&nf, d_new_frontier_cnt, sizeof(IndexType), hipMemcpyDeviceToHost, stream); // Copy next_distances to distances hipMemcpyAsync(distances, next_distances, n * sizeof(DistType), hipMemcpyDeviceToDevice, stream); cudaCheckError(); // We need nf for the loop hipStreamSynchronize(stream); // Swap frontiers IndexType* tmp = frontier; frontier = new_frontier; new_frontier = tmp; iters++; if (iters > n) { // Bail out. Got a graph with a negative cycle std::cerr << "ERROR: Max iterations exceeded. Check the graph for " "negative weight cycles\n"; return GDF_INVALID_API_CALL; } } return GDF_SUCCESS; } template <typename IndexType, typename DistType> void SSSP<IndexType, DistType>::clean() { // the vectors have a destructor that takes care of cleaning ALLOC_FREE_TRY(frontier, nullptr); ALLOC_FREE_TRY(new_frontier, nullptr); ALLOC_FREE_TRY(isolated_bmap, nullptr); ALLOC_FREE_TRY(vertex_degree, nullptr); ALLOC_FREE_TRY(d_cub_exclusive_sum_storage, nullptr); ALLOC_FREE_TRY(frontier_vertex_degree, nullptr); ALLOC_FREE_TRY(exclusive_sum_frontier_vertex_degree, nullptr); ALLOC_FREE_TRY(exclusive_sum_frontier_vertex_buckets_offsets, nullptr); ALLOC_FREE_TRY(iter_buffer, nullptr); // Distances were working data if (!computeDistances) ALLOC_FREE_TRY(distances, nullptr); // next_distances were working data ALLOC_FREE_TRY(next_distances, nullptr); } } // end namespace cugraph /** * ---------------------------------------------------------------------------* * @brief Native sssp with predecessors * * @file sssp.cu * --------------------------------------------------------------------------*/ gdf_error gdf_sssp(gdf_graph* gdf_G, gdf_column* sssp_distances, gdf_column* predecessors, const int source_vert) { GDF_REQUIRE(gdf_G, GDF_INVALID_API_CALL); GDF_REQUIRE(gdf_G->adjList || gdf_G->edgeList, GDF_INVALID_API_CALL); GDF_REQUIRE(source_vert >= 0, GDF_INVALID_API_CALL); void *sssp_dist_ptr, *pred_ptr; // NOTE: gdf_column struct doesn't have a default constructor. So we can get // garbage values for member fields. Right now, it's the caller's // responsibility to ensure that the fields are initialised if the gdf_column // ptr is not null sssp_dist_ptr = (sssp_distances && sssp_distances->size) ? sssp_distances->data : nullptr; pred_ptr = (predecessors && predecessors->size) ? predecessors->data : nullptr; GDF_REQUIRE(sssp_dist_ptr || pred_ptr, GDF_INVALID_API_CALL); if (sssp_dist_ptr) { GDF_REQUIRE(!sssp_distances->valid, GDF_VALIDITY_UNSUPPORTED); // Integral types are possible, but we don't want to deal with overflow // conditions right now GDF_REQUIRE(sssp_distances->dtype == GDF_FLOAT32 || sssp_distances->dtype == GDF_FLOAT64, GDF_INVALID_API_CALL); } gdf_error err = gdf_add_adj_list(gdf_G); if (err != GDF_SUCCESS) return err; GDF_REQUIRE(gdf_G->adjList->offsets->dtype == GDF_INT32, GDF_UNSUPPORTED_DTYPE); GDF_REQUIRE(gdf_G->adjList->indices->dtype == GDF_INT32, GDF_UNSUPPORTED_DTYPE); GDF_REQUIRE(source_vert < gdf_G->adjList->offsets->size - 1, GDF_INVALID_API_CALL); if (pred_ptr) GDF_REQUIRE(predecessors->dtype == gdf_G->adjList->indices->dtype, GDF_UNSUPPORTED_DTYPE); if (sssp_dist_ptr) GDF_REQUIRE(gdf_G->adjList->offsets->size - 1 <= sssp_distances->size, GDF_INVALID_API_CALL); if (!gdf_G->adjList->edge_data) { // Generate unit weights // TODO: This should fallback to BFS, but for now it'll go through the // SSSP path since BFS needs the directed flag, which should not be // necessary for the SSSP API. We can pass directed to the BFS call, but // BFS also does only integer distances right now whereas we need float or // double void* d_edge_data; gdf_G->adjList->edge_data = new gdf_column; hipStream_t stream{nullptr}; // If distances array is given and is double, generate the weights in // double if (sssp_dist_ptr && sssp_distances->dtype == GDF_FLOAT64) { std::vector<double> h_edge_data(gdf_G->adjList->indices->size, 1.0); size_t edge_data_size = sizeof(double) * h_edge_data.size(); ALLOC_TRY((void**)&d_edge_data, edge_data_size, stream); CUDA_TRY(hipMemcpy(d_edge_data, &h_edge_data[0], edge_data_size, hipMemcpyHostToDevice)); gdf_column_view(gdf_G->adjList->edge_data, d_edge_data, nullptr, gdf_G->adjList->indices->size, GDF_FLOAT64); } else { // Else generate float std::vector<float> h_edge_data(gdf_G->adjList->indices->size, 1.0); size_t edge_data_size = sizeof(float) * h_edge_data.size(); ALLOC_TRY((void**)&d_edge_data, edge_data_size, stream); CUDA_TRY(hipMemcpy(d_edge_data, &h_edge_data[0], edge_data_size, hipMemcpyHostToDevice)); gdf_column_view(gdf_G->adjList->edge_data, d_edge_data, nullptr, gdf_G->adjList->indices->size, GDF_FLOAT32); } } else { // Got weighted graph GDF_REQUIRE( gdf_G->adjList->edge_data->size == gdf_G->adjList->indices->size, GDF_INVALID_API_CALL); GDF_REQUIRE(gdf_G->adjList->edge_data->dtype == GDF_FLOAT32 || gdf_G->adjList->edge_data->dtype == GDF_FLOAT64, GDF_INVALID_API_CALL); if (sssp_dist_ptr) GDF_REQUIRE(gdf_G->adjList->edge_data->dtype == sssp_distances->dtype, GDF_UNSUPPORTED_DTYPE); // SSSP is not defined for graphs with negative weight cycles // Warn user about any negative edges if (gdf_G->prop && gdf_G->prop->has_negative_edges == GDF_PROP_TRUE) std::cerr << "WARN: The graph has negative weight edges. SSSP will not " "converge if the graph has negative weight cycles\n"; } int n = gdf_G->adjList->offsets->size - 1; int e = gdf_G->adjList->indices->size; int* offsets_ptr = (int*)gdf_G->adjList->offsets->data; int* indices_ptr = (int*)gdf_G->adjList->indices->data; void* edge_weights_ptr = static_cast<void*>(gdf_G->adjList->edge_data->data); gdf_error ret; if (gdf_G->adjList->edge_data->dtype == GDF_FLOAT32) { cugraph::SSSP<int, float> sssp( n, e, offsets_ptr, indices_ptr, static_cast<float*>(edge_weights_ptr)); sssp.configure(static_cast<float*>(sssp_dist_ptr), static_cast<int*>(pred_ptr), nullptr); ret = sssp.traverse(source_vert); } else if (gdf_G->adjList->edge_data->dtype == GDF_FLOAT64) { cugraph::SSSP<int, double> sssp(n, e, offsets_ptr, indices_ptr, static_cast<double*>(edge_weights_ptr)); sssp.configure(static_cast<double*>(sssp_dist_ptr), static_cast<int*>(pred_ptr), nullptr); ret = sssp.traverse(source_vert); } else { GDF_REQUIRE(gdf_G->adjList->edge_data->dtype == GDF_FLOAT32 || gdf_G->adjList->edge_data->dtype == GDF_FLOAT64, GDF_INVALID_API_CALL); } return ret; }
ad947799acaaa6db8de3df68bc391711fd1e9f87.cu
/* * Copyright (c) 2019, NVIDIA CORPORATION. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ // Author: Prasun Gera [email protected] #include <cugraph.h> #include <rmm_utils.h> #include <algorithm> #include "traversal_common.cuh" #include "sssp.cuh" #include "sssp_kernels.cuh" #include "utilities/error_utils.h" namespace cugraph { template <typename IndexType, typename DistType> void SSSP<IndexType, DistType>::setup() { // Working data // Each vertex can be in the frontier at most once ALLOC_TRY(&frontier, n * sizeof(IndexType), nullptr); ALLOC_TRY(&new_frontier, n * sizeof(IndexType), nullptr); // size of bitmaps for vertices vertices_bmap_size = (n / (8 * sizeof(int)) + 1); // size of bitmaps for edges edges_bmap_size = (nnz / (8 * sizeof(int)) + 1); // ith bit of isolated_bmap is set <=> degree of ith vertex = 0 ALLOC_TRY(&isolated_bmap, sizeof(int) * vertices_bmap_size, nullptr); // Allocate buffer for data that need to be reset every iteration iter_buffer_size = sizeof(int) * (edges_bmap_size + vertices_bmap_size) + sizeof(IndexType); ALLOC_TRY(&iter_buffer, iter_buffer_size, nullptr); // ith bit of relaxed_edges_bmap <=> ith edge was relaxed relaxed_edges_bmap = (int*)iter_buffer; // ith bit of next_frontier_bmap <=> vertex is active in the next frontier next_frontier_bmap = (int*)iter_buffer + edges_bmap_size; // num vertices in the next frontier d_new_frontier_cnt = next_frontier_bmap + vertices_bmap_size; // vertices_degree[i] = degree of vertex i ALLOC_TRY(&vertex_degree, sizeof(IndexType) * n, nullptr); // Cub working data traversal::cub_exclusive_sum_alloc( n + 1, d_cub_exclusive_sum_storage, cub_exclusive_sum_storage_bytes); // frontier_vertex_degree[i] is the degree of vertex frontier[i] ALLOC_TRY(&frontier_vertex_degree, n * sizeof(IndexType), nullptr); // exclusive sum of frontier_vertex_degree ALLOC_TRY(&exclusive_sum_frontier_vertex_degree, (n + 1) * sizeof(IndexType), nullptr); // We use buckets of edges (32 edges per bucket for now, see exact macro in // sssp_kernels). frontier_vertex_degree_buckets_offsets[i] is the index k // such as frontier[k] is the source of the first edge of the bucket // See top down kernels for more details size_t bucket_off_size = ((nnz / TOP_DOWN_EXPAND_DIMX + 1) * NBUCKETS_PER_BLOCK + 2) * sizeof(IndexType); ALLOC_TRY(&exclusive_sum_frontier_vertex_buckets_offsets, bucket_off_size, nullptr); // Repurpose d_new_frontier_cnt temporarily IndexType* d_nisolated = d_new_frontier_cnt; cudaMemsetAsync(d_nisolated, 0, sizeof(IndexType), stream); // Computing isolated_bmap // Only dependent on graph - not source vertex - done once traversal::flag_isolated_vertices( n, isolated_bmap, row_offsets, vertex_degree, d_nisolated, stream); cudaMemcpyAsync(&nisolated, d_nisolated, sizeof(IndexType), cudaMemcpyDeviceToHost, stream); // We need nisolated to be ready to use // nisolated is the number of isolated (zero out-degree) vertices cudaStreamSynchronize(stream); } template <typename IndexType, typename DistType> void SSSP<IndexType, DistType>::configure(DistType* _distances, IndexType* _predecessors, int* _edge_mask) { distances = _distances; predecessors = _predecessors; edge_mask = _edge_mask; useEdgeMask = (edge_mask != NULL); computeDistances = (distances != NULL); computePredecessors = (predecessors != NULL); // We need distances for SSSP even if the caller doesn't need them if (!computeDistances) ALLOC_TRY(&distances, n * sizeof(DistType), nullptr); // Need next_distances in either case ALLOC_TRY(&next_distances, n * sizeof(DistType), nullptr); } template <typename IndexType, typename DistType> gdf_error SSSP<IndexType, DistType>::traverse(IndexType source_vertex) { // Init distances to infinities traversal::fill_vec(distances, n, traversal::vec_t<DistType>::max, stream); traversal::fill_vec( next_distances, n, traversal::vec_t<DistType>::max, stream); // If needed, set all predecessors to non-existent (-1) if (computePredecessors) { cudaMemsetAsync(predecessors, -1, n * sizeof(IndexType), stream); } // // Initial frontier // cudaMemsetAsync(&distances[source_vertex], 0, sizeof(DistType), stream); cudaMemsetAsync(&next_distances[source_vertex], 0, sizeof(DistType), stream); int current_isolated_bmap_source_vert = 0; cudaMemcpyAsync(&current_isolated_bmap_source_vert, &isolated_bmap[source_vertex / INT_SIZE], sizeof(int), cudaMemcpyDeviceToHost); // We need current_isolated_bmap_source_vert cudaStreamSynchronize(stream); int m = (1 << (source_vertex % INT_SIZE)); // If source is isolated (zero outdegree), we are done if ((m & current_isolated_bmap_source_vert)) { // Init distances and predecessors are done; stream is synchronized return GDF_SUCCESS; } // Adding source_vertex to init frontier cudaMemcpyAsync(&frontier[0], &source_vertex, sizeof(IndexType), cudaMemcpyHostToDevice, stream); // Number of vertices in the frontier and number of out-edges from the // frontier IndexType mf, nf; nf = 1; int iters = 0; while (nf > 0) { // Typical pre-top down workflow. set_frontier_degree + exclusive-scan traversal::set_frontier_degree( frontier_vertex_degree, frontier, vertex_degree, nf, stream); traversal::exclusive_sum(d_cub_exclusive_sum_storage, cub_exclusive_sum_storage_bytes, frontier_vertex_degree, exclusive_sum_frontier_vertex_degree, nf + 1, stream); cudaMemcpyAsync(&mf, &exclusive_sum_frontier_vertex_degree[nf], sizeof(IndexType), cudaMemcpyDeviceToHost, stream); // We need mf to know the next kernel's launch dims cudaStreamSynchronize(stream); traversal::compute_bucket_offsets( exclusive_sum_frontier_vertex_degree, exclusive_sum_frontier_vertex_buckets_offsets, nf, mf, stream); // Reset the transient structures to 0 cudaMemsetAsync(iter_buffer, 0, iter_buffer_size, stream); sssp_kernels::frontier_expand( row_offsets, col_indices, edge_weights, frontier, nf, mf, new_frontier, d_new_frontier_cnt, exclusive_sum_frontier_vertex_degree, exclusive_sum_frontier_vertex_buckets_offsets, distances, next_distances, predecessors, edge_mask, next_frontier_bmap, relaxed_edges_bmap, isolated_bmap, stream); cudaMemcpyAsync(&nf, d_new_frontier_cnt, sizeof(IndexType), cudaMemcpyDeviceToHost, stream); // Copy next_distances to distances cudaMemcpyAsync(distances, next_distances, n * sizeof(DistType), cudaMemcpyDeviceToDevice, stream); cudaCheckError(); // We need nf for the loop cudaStreamSynchronize(stream); // Swap frontiers IndexType* tmp = frontier; frontier = new_frontier; new_frontier = tmp; iters++; if (iters > n) { // Bail out. Got a graph with a negative cycle std::cerr << "ERROR: Max iterations exceeded. Check the graph for " "negative weight cycles\n"; return GDF_INVALID_API_CALL; } } return GDF_SUCCESS; } template <typename IndexType, typename DistType> void SSSP<IndexType, DistType>::clean() { // the vectors have a destructor that takes care of cleaning ALLOC_FREE_TRY(frontier, nullptr); ALLOC_FREE_TRY(new_frontier, nullptr); ALLOC_FREE_TRY(isolated_bmap, nullptr); ALLOC_FREE_TRY(vertex_degree, nullptr); ALLOC_FREE_TRY(d_cub_exclusive_sum_storage, nullptr); ALLOC_FREE_TRY(frontier_vertex_degree, nullptr); ALLOC_FREE_TRY(exclusive_sum_frontier_vertex_degree, nullptr); ALLOC_FREE_TRY(exclusive_sum_frontier_vertex_buckets_offsets, nullptr); ALLOC_FREE_TRY(iter_buffer, nullptr); // Distances were working data if (!computeDistances) ALLOC_FREE_TRY(distances, nullptr); // next_distances were working data ALLOC_FREE_TRY(next_distances, nullptr); } } // end namespace cugraph /** * ---------------------------------------------------------------------------* * @brief Native sssp with predecessors * * @file sssp.cu * --------------------------------------------------------------------------*/ gdf_error gdf_sssp(gdf_graph* gdf_G, gdf_column* sssp_distances, gdf_column* predecessors, const int source_vert) { GDF_REQUIRE(gdf_G, GDF_INVALID_API_CALL); GDF_REQUIRE(gdf_G->adjList || gdf_G->edgeList, GDF_INVALID_API_CALL); GDF_REQUIRE(source_vert >= 0, GDF_INVALID_API_CALL); void *sssp_dist_ptr, *pred_ptr; // NOTE: gdf_column struct doesn't have a default constructor. So we can get // garbage values for member fields. Right now, it's the caller's // responsibility to ensure that the fields are initialised if the gdf_column // ptr is not null sssp_dist_ptr = (sssp_distances && sssp_distances->size) ? sssp_distances->data : nullptr; pred_ptr = (predecessors && predecessors->size) ? predecessors->data : nullptr; GDF_REQUIRE(sssp_dist_ptr || pred_ptr, GDF_INVALID_API_CALL); if (sssp_dist_ptr) { GDF_REQUIRE(!sssp_distances->valid, GDF_VALIDITY_UNSUPPORTED); // Integral types are possible, but we don't want to deal with overflow // conditions right now GDF_REQUIRE(sssp_distances->dtype == GDF_FLOAT32 || sssp_distances->dtype == GDF_FLOAT64, GDF_INVALID_API_CALL); } gdf_error err = gdf_add_adj_list(gdf_G); if (err != GDF_SUCCESS) return err; GDF_REQUIRE(gdf_G->adjList->offsets->dtype == GDF_INT32, GDF_UNSUPPORTED_DTYPE); GDF_REQUIRE(gdf_G->adjList->indices->dtype == GDF_INT32, GDF_UNSUPPORTED_DTYPE); GDF_REQUIRE(source_vert < gdf_G->adjList->offsets->size - 1, GDF_INVALID_API_CALL); if (pred_ptr) GDF_REQUIRE(predecessors->dtype == gdf_G->adjList->indices->dtype, GDF_UNSUPPORTED_DTYPE); if (sssp_dist_ptr) GDF_REQUIRE(gdf_G->adjList->offsets->size - 1 <= sssp_distances->size, GDF_INVALID_API_CALL); if (!gdf_G->adjList->edge_data) { // Generate unit weights // TODO: This should fallback to BFS, but for now it'll go through the // SSSP path since BFS needs the directed flag, which should not be // necessary for the SSSP API. We can pass directed to the BFS call, but // BFS also does only integer distances right now whereas we need float or // double void* d_edge_data; gdf_G->adjList->edge_data = new gdf_column; cudaStream_t stream{nullptr}; // If distances array is given and is double, generate the weights in // double if (sssp_dist_ptr && sssp_distances->dtype == GDF_FLOAT64) { std::vector<double> h_edge_data(gdf_G->adjList->indices->size, 1.0); size_t edge_data_size = sizeof(double) * h_edge_data.size(); ALLOC_TRY((void**)&d_edge_data, edge_data_size, stream); CUDA_TRY(cudaMemcpy(d_edge_data, &h_edge_data[0], edge_data_size, cudaMemcpyHostToDevice)); gdf_column_view(gdf_G->adjList->edge_data, d_edge_data, nullptr, gdf_G->adjList->indices->size, GDF_FLOAT64); } else { // Else generate float std::vector<float> h_edge_data(gdf_G->adjList->indices->size, 1.0); size_t edge_data_size = sizeof(float) * h_edge_data.size(); ALLOC_TRY((void**)&d_edge_data, edge_data_size, stream); CUDA_TRY(cudaMemcpy(d_edge_data, &h_edge_data[0], edge_data_size, cudaMemcpyHostToDevice)); gdf_column_view(gdf_G->adjList->edge_data, d_edge_data, nullptr, gdf_G->adjList->indices->size, GDF_FLOAT32); } } else { // Got weighted graph GDF_REQUIRE( gdf_G->adjList->edge_data->size == gdf_G->adjList->indices->size, GDF_INVALID_API_CALL); GDF_REQUIRE(gdf_G->adjList->edge_data->dtype == GDF_FLOAT32 || gdf_G->adjList->edge_data->dtype == GDF_FLOAT64, GDF_INVALID_API_CALL); if (sssp_dist_ptr) GDF_REQUIRE(gdf_G->adjList->edge_data->dtype == sssp_distances->dtype, GDF_UNSUPPORTED_DTYPE); // SSSP is not defined for graphs with negative weight cycles // Warn user about any negative edges if (gdf_G->prop && gdf_G->prop->has_negative_edges == GDF_PROP_TRUE) std::cerr << "WARN: The graph has negative weight edges. SSSP will not " "converge if the graph has negative weight cycles\n"; } int n = gdf_G->adjList->offsets->size - 1; int e = gdf_G->adjList->indices->size; int* offsets_ptr = (int*)gdf_G->adjList->offsets->data; int* indices_ptr = (int*)gdf_G->adjList->indices->data; void* edge_weights_ptr = static_cast<void*>(gdf_G->adjList->edge_data->data); gdf_error ret; if (gdf_G->adjList->edge_data->dtype == GDF_FLOAT32) { cugraph::SSSP<int, float> sssp( n, e, offsets_ptr, indices_ptr, static_cast<float*>(edge_weights_ptr)); sssp.configure(static_cast<float*>(sssp_dist_ptr), static_cast<int*>(pred_ptr), nullptr); ret = sssp.traverse(source_vert); } else if (gdf_G->adjList->edge_data->dtype == GDF_FLOAT64) { cugraph::SSSP<int, double> sssp(n, e, offsets_ptr, indices_ptr, static_cast<double*>(edge_weights_ptr)); sssp.configure(static_cast<double*>(sssp_dist_ptr), static_cast<int*>(pred_ptr), nullptr); ret = sssp.traverse(source_vert); } else { GDF_REQUIRE(gdf_G->adjList->edge_data->dtype == GDF_FLOAT32 || gdf_G->adjList->edge_data->dtype == GDF_FLOAT64, GDF_INVALID_API_CALL); } return ret; }
05955d87791c59b81ca9b4a9befaee67e28dc24a.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include <stdio.h> #include <math.h> __global__ void matmul(float * a, float * b, float * c, int * a_shape, int * b_shape) { if ((blockDim.y * blockIdx.y + threadIdx.y) < a_shape[0] && (blockDim.x * blockIdx.x + threadIdx.x) < b_shape[1]) { int aMin = (blockDim.y * blockIdx.y + threadIdx.y) * a_shape[1]; int aMax = (blockDim.y * blockIdx.y + threadIdx.y + 1) * a_shape[1]; int aStep = 1; int bMin = blockDim.x * blockIdx.x + threadIdx.x; int bMax = blockDim.x * blockIdx.x + threadIdx.x + b_shape[0] * b_shape[1]; int bStep = b_shape[1]; float temp = 0; for (int ai = aMin, bi = bMin; ai < aMax && bi < bMax; ai += aStep, bi += bStep) { temp += a[ai] * b[bi]; } int a_index = (blockDim.y * blockIdx.y + threadIdx.y) * b_shape[1]; c[a_index + bMin] = temp; } } __global__ void transpose(float * a, float * a_T, int * a_shape) { int elem_idx = (blockDim.y * blockIdx.y + threadIdx.y) * a_shape[1] + blockDim.x * blockIdx.x + threadIdx.x; if (elem_idx < a_shape[0] * a_shape[1]) { int a_t_1 = a_shape[0]; int elem_tr_idx = (blockDim.x * blockIdx.x + threadIdx.x) * a_t_1 + blockDim.y * blockIdx.y + threadIdx.y; a_T[elem_tr_idx] = a[elem_idx]; } } __global__ void row_mean(float * a, float * mean, int * a_shape) { //Returns a column int row_num = (blockDim.x * blockIdx.x + threadIdx.x); if (row_num < a_shape[0]) { int start_idx = row_num * a_shape[1]; int end_idx = start_idx + a_shape[1]; float sum = 0; for (int i = start_idx; i < end_idx; i++) { sum += a[i]; } mean[row_num] = sum / a_shape[1]; } } __global__ void column_mean(float * a, float * mean, int * a_shape) { //Returns a row int col_num = (blockDim.x * blockIdx.x + threadIdx.x); if (col_num < a_shape[1]) { int start_idx = col_num; int end_idx = start_idx + a_shape[1] * a_shape[0]; float sum = 0; for (int i = start_idx; i < end_idx; i += a_shape[1]) { sum += a[i]; } mean[col_num] = sum / a_shape[0]; } } __global__ void min_row(float * a, int * a_shape, float * min_row, int * arg_min) { //Returns a column for min_row and argmin int row_num = (blockDim.x * blockIdx.x + threadIdx.x); if (row_num < a_shape[0]) { int start_idx = row_num * a_shape[1]; int end_idx = start_idx + a_shape[1]; min_row[row_num] = a[start_idx]; arg_min[row_num] = 0; for (int col = start_idx + 1, index = 1; col < end_idx, index < a_shape[1]; col++, index++) { if (a[col] < min_row[row_num]) { min_row[row_num] = a[col]; arg_min[row_num] = index; } } } } __global__ void sum_axis3(float * a, int * a_shape, float * result) { //a[i][j][k] = k+a_shape[2]*j + a_shape[2]*a_shape[1]*i int col_num = (blockDim.x * blockIdx.x + threadIdx.x); int row_num = (blockDim.y * blockIdx.y + threadIdx.y); if (row_num < a_shape[0] && col_num < a_shape[1]) { int start_idx = (row_num * a_shape[1] + col_num) * a_shape[2]; int end_idx = start_idx + a_shape[2]; int step = 1; float temp = 0; for (int idx = start_idx; idx < end_idx; idx += step) { temp += a[idx]; } result[row_num * a_shape[1] + col_num] = temp; } } __global__ void sum_axis2(float * a, int * a_shape, float * result) { //a[i][j][k] = k+a_shape[2]*j + a_shape[2]*a_shape[1]*i int col_num = (blockDim.x * blockIdx.x + threadIdx.x); int row_num = (blockDim.y * blockIdx.y + threadIdx.y); if (row_num < a_shape[0] && col_num < a_shape[2]) { int start_idx = row_num * a_shape[1] * a_shape[2] + col_num; int end_idx = start_idx + a_shape[2] * a_shape[1]; int step = a_shape[2]; float temp = 0; for (int idx = start_idx; idx < end_idx; idx += step) { temp += a[idx]; } result[row_num * a_shape[2] + col_num] = temp; } } __global__ void sum_axis1(float * a, int * a_shape, float * result) { //a[i][j][k] = k+a_shape[2]*j + a_shape[2]*a_shape[1]*i int col_num = (blockDim.x * blockIdx.x + threadIdx.x); int row_num = (blockDim.y * blockIdx.y + threadIdx.y); if (row_num < a_shape[1] && col_num < a_shape[2]) { int start_idx = (row_num) * a_shape[2] + col_num; int end_idx = start_idx + a_shape[2] * a_shape[1] * a_shape[0]; int step = a_shape[2] * a_shape[1]; float temp = 0; for (int idx = start_idx; idx < end_idx; idx += step) { temp += a[idx]; } result[row_num * a_shape[2] + col_num] = temp; } } __global__ void argmin_mu_diff(float * data, float * mu, int * data_shape, int * mu_shape, int * arg_min) { int data_id = blockDim.x * blockIdx.x + threadIdx.x; if (data_id < data_shape[0]) { int startIdx = (blockDim.x * blockIdx.x + threadIdx.x) * data_shape[1]; float min_diff = INT_MAX; float arg_min_diff = -1; for (int i = 0; i < mu_shape[0]; i++) { float diff = 0; for (int dim = 0; dim < mu_shape[1]; dim++) { diff += (data[startIdx + dim] - mu[i * mu_shape[1] + dim]) * (data[startIdx + dim] - mu[i * mu_shape[1] + dim]); } if (diff < min_diff) { min_diff = diff; arg_min_diff = i; } } arg_min[data_id] = arg_min_diff; } }
05955d87791c59b81ca9b4a9befaee67e28dc24a.cu
#include <stdio.h> #include <math.h> __global__ void matmul(float * a, float * b, float * c, int * a_shape, int * b_shape) { if ((blockDim.y * blockIdx.y + threadIdx.y) < a_shape[0] && (blockDim.x * blockIdx.x + threadIdx.x) < b_shape[1]) { int aMin = (blockDim.y * blockIdx.y + threadIdx.y) * a_shape[1]; int aMax = (blockDim.y * blockIdx.y + threadIdx.y + 1) * a_shape[1]; int aStep = 1; int bMin = blockDim.x * blockIdx.x + threadIdx.x; int bMax = blockDim.x * blockIdx.x + threadIdx.x + b_shape[0] * b_shape[1]; int bStep = b_shape[1]; float temp = 0; for (int ai = aMin, bi = bMin; ai < aMax && bi < bMax; ai += aStep, bi += bStep) { temp += a[ai] * b[bi]; } int a_index = (blockDim.y * blockIdx.y + threadIdx.y) * b_shape[1]; c[a_index + bMin] = temp; } } __global__ void transpose(float * a, float * a_T, int * a_shape) { int elem_idx = (blockDim.y * blockIdx.y + threadIdx.y) * a_shape[1] + blockDim.x * blockIdx.x + threadIdx.x; if (elem_idx < a_shape[0] * a_shape[1]) { int a_t_1 = a_shape[0]; int elem_tr_idx = (blockDim.x * blockIdx.x + threadIdx.x) * a_t_1 + blockDim.y * blockIdx.y + threadIdx.y; a_T[elem_tr_idx] = a[elem_idx]; } } __global__ void row_mean(float * a, float * mean, int * a_shape) { //Returns a column int row_num = (blockDim.x * blockIdx.x + threadIdx.x); if (row_num < a_shape[0]) { int start_idx = row_num * a_shape[1]; int end_idx = start_idx + a_shape[1]; float sum = 0; for (int i = start_idx; i < end_idx; i++) { sum += a[i]; } mean[row_num] = sum / a_shape[1]; } } __global__ void column_mean(float * a, float * mean, int * a_shape) { //Returns a row int col_num = (blockDim.x * blockIdx.x + threadIdx.x); if (col_num < a_shape[1]) { int start_idx = col_num; int end_idx = start_idx + a_shape[1] * a_shape[0]; float sum = 0; for (int i = start_idx; i < end_idx; i += a_shape[1]) { sum += a[i]; } mean[col_num] = sum / a_shape[0]; } } __global__ void min_row(float * a, int * a_shape, float * min_row, int * arg_min) { //Returns a column for min_row and argmin int row_num = (blockDim.x * blockIdx.x + threadIdx.x); if (row_num < a_shape[0]) { int start_idx = row_num * a_shape[1]; int end_idx = start_idx + a_shape[1]; min_row[row_num] = a[start_idx]; arg_min[row_num] = 0; for (int col = start_idx + 1, index = 1; col < end_idx, index < a_shape[1]; col++, index++) { if (a[col] < min_row[row_num]) { min_row[row_num] = a[col]; arg_min[row_num] = index; } } } } __global__ void sum_axis3(float * a, int * a_shape, float * result) { //a[i][j][k] = k+a_shape[2]*j + a_shape[2]*a_shape[1]*i int col_num = (blockDim.x * blockIdx.x + threadIdx.x); int row_num = (blockDim.y * blockIdx.y + threadIdx.y); if (row_num < a_shape[0] && col_num < a_shape[1]) { int start_idx = (row_num * a_shape[1] + col_num) * a_shape[2]; int end_idx = start_idx + a_shape[2]; int step = 1; float temp = 0; for (int idx = start_idx; idx < end_idx; idx += step) { temp += a[idx]; } result[row_num * a_shape[1] + col_num] = temp; } } __global__ void sum_axis2(float * a, int * a_shape, float * result) { //a[i][j][k] = k+a_shape[2]*j + a_shape[2]*a_shape[1]*i int col_num = (blockDim.x * blockIdx.x + threadIdx.x); int row_num = (blockDim.y * blockIdx.y + threadIdx.y); if (row_num < a_shape[0] && col_num < a_shape[2]) { int start_idx = row_num * a_shape[1] * a_shape[2] + col_num; int end_idx = start_idx + a_shape[2] * a_shape[1]; int step = a_shape[2]; float temp = 0; for (int idx = start_idx; idx < end_idx; idx += step) { temp += a[idx]; } result[row_num * a_shape[2] + col_num] = temp; } } __global__ void sum_axis1(float * a, int * a_shape, float * result) { //a[i][j][k] = k+a_shape[2]*j + a_shape[2]*a_shape[1]*i int col_num = (blockDim.x * blockIdx.x + threadIdx.x); int row_num = (blockDim.y * blockIdx.y + threadIdx.y); if (row_num < a_shape[1] && col_num < a_shape[2]) { int start_idx = (row_num) * a_shape[2] + col_num; int end_idx = start_idx + a_shape[2] * a_shape[1] * a_shape[0]; int step = a_shape[2] * a_shape[1]; float temp = 0; for (int idx = start_idx; idx < end_idx; idx += step) { temp += a[idx]; } result[row_num * a_shape[2] + col_num] = temp; } } __global__ void argmin_mu_diff(float * data, float * mu, int * data_shape, int * mu_shape, int * arg_min) { int data_id = blockDim.x * blockIdx.x + threadIdx.x; if (data_id < data_shape[0]) { int startIdx = (blockDim.x * blockIdx.x + threadIdx.x) * data_shape[1]; float min_diff = INT_MAX; float arg_min_diff = -1; for (int i = 0; i < mu_shape[0]; i++) { float diff = 0; for (int dim = 0; dim < mu_shape[1]; dim++) { diff += (data[startIdx + dim] - mu[i * mu_shape[1] + dim]) * (data[startIdx + dim] - mu[i * mu_shape[1] + dim]); } if (diff < min_diff) { min_diff = diff; arg_min_diff = i; } } arg_min[data_id] = arg_min_diff; } }
4e1cc99a54feb58af045687e49a99c9545612605.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" // // include files // #include <stdlib.h> #include <stdio.h> #include <string.h> #define BLOCK_NUM (1024 * 32) #define THREAD_NUM 32 #define N (BLOCK_NUM * THREAD_NUM) static void cuda_checker(hipError_t err, const char *file, int line ) { if (err != hipSuccess) { printf("%s in %s at line %d\n", hipGetErrorString(err), file, line); exit(EXIT_FAILURE); } } #define CUDA_CHECK(err) (cuda_checker(err, __FILE__, __LINE__ )) // // kernel code // __global__ void add(int *a, int *b, int *c) { int tid = threadIdx.x + blockIdx.x * blockDim.x; // handle the data at this index if (tid < N) { if(tid < N / 2) { c[tid] = a[tid] + b[tid]; } else { c[tid] = a[tid] - b[tid]; } } } // // host code // int main(int argc, const char **argv) { int *a, *b, *c; a = (int*) malloc(sizeof(int) * N); b = (int*) malloc(sizeof(int) * N); c = (int*) malloc(sizeof(int) * N); int *dev_a, *dev_b, *dev_c; for(int i = 0; i < N; i++) { a[i] = -i; b[i] = i * i; } CUDA_CHECK( hipMalloc((void**)&dev_a, N * sizeof(int)) ); CUDA_CHECK( hipMalloc((void**)&dev_b, N * sizeof(int)) ); CUDA_CHECK( hipMalloc((void**)&dev_c, N * sizeof(int)) ); CUDA_CHECK( hipMemcpy(dev_a, a, N * sizeof(int), hipMemcpyHostToDevice) ); CUDA_CHECK( hipMemcpy(dev_b, b, N * sizeof(int), hipMemcpyHostToDevice) ); float time; hipEvent_t start, stop; CUDA_CHECK(hipEventCreate(&start)); CUDA_CHECK(hipEventCreate(&stop)); CUDA_CHECK(hipEventRecord(start, 0)); hipLaunchKernelGGL(( add), dim3(BLOCK_NUM), dim3(THREAD_NUM), 0, 0, dev_a, dev_b, dev_c); CUDA_CHECK( hipMemcpy(c, dev_c, N * sizeof(int), hipMemcpyDeviceToHost) ); CUDA_CHECK(hipEventRecord(stop, 0)); CUDA_CHECK(hipEventSynchronize(stop)); CUDA_CHECK(hipEventElapsedTime(&time, start, stop)); printf("Time to generate: %3.1f ms \n", time); // for( int i = 0; i < N; i++ ){ // int cpu_value = 0; // if (i % 2 == 0) { // a[i]+b[i]; // } else { // a[i]-b[i]; // } // printf( "cpu: %d, gpu: %d\n", cpu_value, c[i]); // } CUDA_CHECK( hipFree(dev_a) ); CUDA_CHECK( hipFree(dev_b) ); CUDA_CHECK( hipFree(dev_c) ); hipDeviceReset(); return 0; }
4e1cc99a54feb58af045687e49a99c9545612605.cu
// // include files // #include <stdlib.h> #include <stdio.h> #include <string.h> #define BLOCK_NUM (1024 * 32) #define THREAD_NUM 32 #define N (BLOCK_NUM * THREAD_NUM) static void cuda_checker(cudaError_t err, const char *file, int line ) { if (err != cudaSuccess) { printf("%s in %s at line %d\n", cudaGetErrorString(err), file, line); exit(EXIT_FAILURE); } } #define CUDA_CHECK(err) (cuda_checker(err, __FILE__, __LINE__ )) // // kernel code // __global__ void add(int *a, int *b, int *c) { int tid = threadIdx.x + blockIdx.x * blockDim.x; // handle the data at this index if (tid < N) { if(tid < N / 2) { c[tid] = a[tid] + b[tid]; } else { c[tid] = a[tid] - b[tid]; } } } // // host code // int main(int argc, const char **argv) { int *a, *b, *c; a = (int*) malloc(sizeof(int) * N); b = (int*) malloc(sizeof(int) * N); c = (int*) malloc(sizeof(int) * N); int *dev_a, *dev_b, *dev_c; for(int i = 0; i < N; i++) { a[i] = -i; b[i] = i * i; } CUDA_CHECK( cudaMalloc((void**)&dev_a, N * sizeof(int)) ); CUDA_CHECK( cudaMalloc((void**)&dev_b, N * sizeof(int)) ); CUDA_CHECK( cudaMalloc((void**)&dev_c, N * sizeof(int)) ); CUDA_CHECK( cudaMemcpy(dev_a, a, N * sizeof(int), cudaMemcpyHostToDevice) ); CUDA_CHECK( cudaMemcpy(dev_b, b, N * sizeof(int), cudaMemcpyHostToDevice) ); float time; cudaEvent_t start, stop; CUDA_CHECK(cudaEventCreate(&start)); CUDA_CHECK(cudaEventCreate(&stop)); CUDA_CHECK(cudaEventRecord(start, 0)); add<<<BLOCK_NUM, THREAD_NUM>>>(dev_a, dev_b, dev_c); CUDA_CHECK( cudaMemcpy(c, dev_c, N * sizeof(int), cudaMemcpyDeviceToHost) ); CUDA_CHECK(cudaEventRecord(stop, 0)); CUDA_CHECK(cudaEventSynchronize(stop)); CUDA_CHECK(cudaEventElapsedTime(&time, start, stop)); printf("Time to generate: %3.1f ms \n", time); // for( int i = 0; i < N; i++ ){ // int cpu_value = 0; // if (i % 2 == 0) { // a[i]+b[i]; // } else { // a[i]-b[i]; // } // printf( "cpu: %d, gpu: %d\n", cpu_value, c[i]); // } CUDA_CHECK( cudaFree(dev_a) ); CUDA_CHECK( cudaFree(dev_b) ); CUDA_CHECK( cudaFree(dev_c) ); cudaDeviceReset(); return 0; }
480aa89bed3d873a6643180156d16a7e5b364ccf.hip
// !!! This is a file automatically generated by hipify!!! #if !MEGDNN_TEGRA_X1 // generated by gen_cuda_conv_bias_kern_impls.py // ignore warning of cutlass #pragma GCC diagnostic push #pragma GCC diagnostic ignored "-Wunused-parameter" #pragma GCC diagnostic ignored "-Wstrict-aliasing" #include "src/cuda/conv_bias/int8/conv_bias_int8_implicit_gemm_cutlass_wrapper.cuinl" using LayoutSrc = cutlass::layout::TensorNCxHWx<4>; using LayoutFilter = cutlass::layout::TensorCxRSKx<4>; using LayoutDst = cutlass::layout::TensorNCHW; using ThreadBlockShape = cutlass::gemm::GemmShape<16, 64, 8>; using WarpShape = cutlass::gemm::GemmShape<16, 64, 8>; using InstructionShape = cutlass::gemm::GemmShape<1, 1, 4>; using EpilogueOp = cutlass::epilogue::thread::BiasAddLinearCombination< float, 1, int32_t, float, float>; using Convolution = cutlass::convolution::device::Convolution< int8_t, LayoutSrc, int8_t, LayoutFilter, float, LayoutDst, float, LayoutDst, int32_t, cutlass::convolution::ConvType::kConvolution, cutlass::arch::OpClassSimt, cutlass::arch::Sm61, ThreadBlockShape, WarpShape, InstructionShape, EpilogueOp, cutlass::convolution::threadblock::ConvolutionNCxHWxThreadblockSwizzle< cutlass::convolution::ConvType::kConvolution>, 2, 4, 4, false, cutlass::arch::OpMultiplyAdd>; template void megdnn::cuda::cutlass_wrapper::cutlass_convolution_wrapper<Convolution>( const typename Convolution::ElementSrc* d_src, const typename Convolution::ElementFilter* d_filter, const typename Convolution::ElementBias* d_bias, const typename Convolution::ElementDst* d_z, typename Convolution::ElementDst* d_dst, int* workspace, typename Convolution::ConvolutionParameter const& conv_param, typename Convolution::EpilogueOutputOp::Params const& epilogue, hipStream_t stream); #pragma GCC diagnostic pop #endif
480aa89bed3d873a6643180156d16a7e5b364ccf.cu
#if !MEGDNN_TEGRA_X1 // generated by gen_cuda_conv_bias_kern_impls.py // ignore warning of cutlass #pragma GCC diagnostic push #pragma GCC diagnostic ignored "-Wunused-parameter" #pragma GCC diagnostic ignored "-Wstrict-aliasing" #include "src/cuda/conv_bias/int8/conv_bias_int8_implicit_gemm_cutlass_wrapper.cuinl" using LayoutSrc = cutlass::layout::TensorNCxHWx<4>; using LayoutFilter = cutlass::layout::TensorCxRSKx<4>; using LayoutDst = cutlass::layout::TensorNCHW; using ThreadBlockShape = cutlass::gemm::GemmShape<16, 64, 8>; using WarpShape = cutlass::gemm::GemmShape<16, 64, 8>; using InstructionShape = cutlass::gemm::GemmShape<1, 1, 4>; using EpilogueOp = cutlass::epilogue::thread::BiasAddLinearCombination< float, 1, int32_t, float, float>; using Convolution = cutlass::convolution::device::Convolution< int8_t, LayoutSrc, int8_t, LayoutFilter, float, LayoutDst, float, LayoutDst, int32_t, cutlass::convolution::ConvType::kConvolution, cutlass::arch::OpClassSimt, cutlass::arch::Sm61, ThreadBlockShape, WarpShape, InstructionShape, EpilogueOp, cutlass::convolution::threadblock::ConvolutionNCxHWxThreadblockSwizzle< cutlass::convolution::ConvType::kConvolution>, 2, 4, 4, false, cutlass::arch::OpMultiplyAdd>; template void megdnn::cuda::cutlass_wrapper::cutlass_convolution_wrapper<Convolution>( const typename Convolution::ElementSrc* d_src, const typename Convolution::ElementFilter* d_filter, const typename Convolution::ElementBias* d_bias, const typename Convolution::ElementDst* d_z, typename Convolution::ElementDst* d_dst, int* workspace, typename Convolution::ConvolutionParameter const& conv_param, typename Convolution::EpilogueOutputOp::Params const& epilogue, cudaStream_t stream); #pragma GCC diagnostic pop #endif
9878907d7910024fef04d635d7fbeccecaa7074c.hip
// !!! This is a file automatically generated by hipify!!! #include <THHUNN/THHUNN.h> #include <THH/THHTensor.hpp> #include <THHUNN/common.h> #include <ATen/native/hip/im2col.cuh> #include <TH/THHalf.h> #include <THH/THHNumerics.cuh> #include <THH/THHTensor.hpp> #include <THH/THHStorage.hpp> #include <THHUNN/generic/SpatialConvolutionLocal.hip> #include <THH/THHGenerateFloatTypes.h>
9878907d7910024fef04d635d7fbeccecaa7074c.cu
#include <THCUNN/THCUNN.h> #include <THC/THCTensor.hpp> #include <THCUNN/common.h> #include <ATen/native/cuda/im2col.cuh> #include <TH/THHalf.h> #include <THC/THCNumerics.cuh> #include <THC/THCTensor.hpp> #include <THC/THCStorage.hpp> #include <THCUNN/generic/SpatialConvolutionLocal.cu> #include <THC/THCGenerateFloatTypes.h>
9c36591fe61c7cfb44ddb9988499289cc78842b9.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include "difference_impl.h" #include <dlib/dnn/cuda_utils.h> __global__ void apply_differencing_impl( const float* input_tensor, float* output_tensor, long in_nk, long in_nr, long in_nc, long nbhd_nr, long nbhd_nc, long n ) { for (auto i : dlib::cuda::grid_stride_range(0, n)) { // Find neighborhood indices long nbhd_c = i/nbhd_nc % in_nc; // also center column long nbhd_r = i/nbhd_nc/in_nc/nbhd_nr % in_nr; // also center row long k = i/nbhd_nc/in_nc/nbhd_nr/in_nr % in_nk; long sample = i/nbhd_nc/in_nc/nbhd_nr/in_nr/in_nk; // Find in-neighborhood indices long in_nbhd_c = i % nbhd_nc; long in_nbhd_r = i/nbhd_nc/in_nc % nbhd_nr; // Find the second input tensor indices long in_c = nbhd_c - nbhd_nc/2 + in_nbhd_c; long in_r = nbhd_r - nbhd_nr/2 + in_nbhd_r; long flag = (sample % 2 == 0) ? 1 : -1; if (in_c < 0 || in_r < 0 || in_nc <= in_c || in_nr <= in_r) { output_tensor[i] = 0.0; } else { long idx1 = ((sample*in_nk + k)*in_nr + nbhd_r)*in_nc + nbhd_c; long idx2 = (((sample+flag)*in_nk + k)*in_nr + in_r)*in_nc + in_c; output_tensor[i] = input_tensor[idx1]-input_tensor[idx2]; } } } __global__ void get_differencing_gradient_impl( const float* gradient_input, float* gradient_output, long out_nk, long out_nr, long out_nc, long nbhd_nr, long nbhd_nc, long n ) { for (auto i : dlib::cuda::grid_stride_range(0, n)) { // Find the output indices long out_c = i % out_nc; long out_r = i/out_nc % out_nr; long k = i/out_nc/out_nr % out_nk; long sample = i/out_nc/out_nr/out_nk; gradient_output[i] = 0; for (long r = out_r*nbhd_nr; r < (out_r+1)*nbhd_nr; ++r) { long offset = ((sample*out_nk + k)*out_nr*nbhd_nr + r)*out_nc*nbhd_nc; for (long c = out_c*nbhd_nc; c < (out_c+1)*nbhd_nc; ++c) { gradient_output[i] += gradient_input[offset + c]; } } long flag = (sample % 2 == 0) ? 1 : -1; long r_off = nbhd_nr/2; long c_off = nbhd_nc/2; long out_nbhd_r = 0; // in-neighborhood row index for (long r = out_r+r_off; r >= out_r-r_off; --r) { if (r < 0 || r >= out_nr) { ++out_nbhd_r; continue; } long out_nbhd_c = 0; // in-neighborhood column index long offset = (((sample+flag)*out_nk + k)*out_nr*nbhd_nr + r*nbhd_nr + out_nbhd_r)*out_nc*nbhd_nc; ++out_nbhd_r; for (long c = out_c+c_off; c >= out_c-c_off; --c) { if (c < 0 || c >= out_nc) { ++out_nbhd_c; continue; } gradient_output[i] -= gradient_input[offset + c*nbhd_nc + out_nbhd_c]; ++out_nbhd_c; } } } } void launch_differencing_kernel( const float* input_tensor, float* data_output, long in_nk, long in_nr, long in_nc, long nbhd_nr, long nbhd_nc, long n ) { dlib::cuda::launch_kernel(apply_differencing_impl, dlib::cuda::max_jobs(n), input_tensor, data_output, in_nk, in_nr, in_nc, nbhd_nr, nbhd_nc, n); } void launch_differencing_gradient_kernel( const float* gradient_input, float* gradient_output, long in_nk, long in_nr, long in_nc, long nbhd_nr, long nbhd_nc, long n ) { dlib::cuda::launch_kernel(get_differencing_gradient_impl, dlib::cuda::max_jobs(n), gradient_input, gradient_output, in_nk, in_nr, in_nc, nbhd_nr, nbhd_nc, n); }
9c36591fe61c7cfb44ddb9988499289cc78842b9.cu
#include "difference_impl.h" #include <dlib/dnn/cuda_utils.h> __global__ void apply_differencing_impl( const float* input_tensor, float* output_tensor, long in_nk, long in_nr, long in_nc, long nbhd_nr, long nbhd_nc, long n ) { for (auto i : dlib::cuda::grid_stride_range(0, n)) { // Find neighborhood indices long nbhd_c = i/nbhd_nc % in_nc; // also center column long nbhd_r = i/nbhd_nc/in_nc/nbhd_nr % in_nr; // also center row long k = i/nbhd_nc/in_nc/nbhd_nr/in_nr % in_nk; long sample = i/nbhd_nc/in_nc/nbhd_nr/in_nr/in_nk; // Find in-neighborhood indices long in_nbhd_c = i % nbhd_nc; long in_nbhd_r = i/nbhd_nc/in_nc % nbhd_nr; // Find the second input tensor indices long in_c = nbhd_c - nbhd_nc/2 + in_nbhd_c; long in_r = nbhd_r - nbhd_nr/2 + in_nbhd_r; long flag = (sample % 2 == 0) ? 1 : -1; if (in_c < 0 || in_r < 0 || in_nc <= in_c || in_nr <= in_r) { output_tensor[i] = 0.0; } else { long idx1 = ((sample*in_nk + k)*in_nr + nbhd_r)*in_nc + nbhd_c; long idx2 = (((sample+flag)*in_nk + k)*in_nr + in_r)*in_nc + in_c; output_tensor[i] = input_tensor[idx1]-input_tensor[idx2]; } } } __global__ void get_differencing_gradient_impl( const float* gradient_input, float* gradient_output, long out_nk, long out_nr, long out_nc, long nbhd_nr, long nbhd_nc, long n ) { for (auto i : dlib::cuda::grid_stride_range(0, n)) { // Find the output indices long out_c = i % out_nc; long out_r = i/out_nc % out_nr; long k = i/out_nc/out_nr % out_nk; long sample = i/out_nc/out_nr/out_nk; gradient_output[i] = 0; for (long r = out_r*nbhd_nr; r < (out_r+1)*nbhd_nr; ++r) { long offset = ((sample*out_nk + k)*out_nr*nbhd_nr + r)*out_nc*nbhd_nc; for (long c = out_c*nbhd_nc; c < (out_c+1)*nbhd_nc; ++c) { gradient_output[i] += gradient_input[offset + c]; } } long flag = (sample % 2 == 0) ? 1 : -1; long r_off = nbhd_nr/2; long c_off = nbhd_nc/2; long out_nbhd_r = 0; // in-neighborhood row index for (long r = out_r+r_off; r >= out_r-r_off; --r) { if (r < 0 || r >= out_nr) { ++out_nbhd_r; continue; } long out_nbhd_c = 0; // in-neighborhood column index long offset = (((sample+flag)*out_nk + k)*out_nr*nbhd_nr + r*nbhd_nr + out_nbhd_r)*out_nc*nbhd_nc; ++out_nbhd_r; for (long c = out_c+c_off; c >= out_c-c_off; --c) { if (c < 0 || c >= out_nc) { ++out_nbhd_c; continue; } gradient_output[i] -= gradient_input[offset + c*nbhd_nc + out_nbhd_c]; ++out_nbhd_c; } } } } void launch_differencing_kernel( const float* input_tensor, float* data_output, long in_nk, long in_nr, long in_nc, long nbhd_nr, long nbhd_nc, long n ) { dlib::cuda::launch_kernel(apply_differencing_impl, dlib::cuda::max_jobs(n), input_tensor, data_output, in_nk, in_nr, in_nc, nbhd_nr, nbhd_nc, n); } void launch_differencing_gradient_kernel( const float* gradient_input, float* gradient_output, long in_nk, long in_nr, long in_nc, long nbhd_nr, long nbhd_nc, long n ) { dlib::cuda::launch_kernel(get_differencing_gradient_impl, dlib::cuda::max_jobs(n), gradient_input, gradient_output, in_nk, in_nr, in_nc, nbhd_nr, nbhd_nc, n); }
b886f54d4ba6ccba4d6545bd22a2281ffc93cd4e.hip
// !!! This is a file automatically generated by hipify!!! /** * \file dnn/src/cuda/conv_bias/int8/kimpl/conv_bias_int8_implicit_gemm_cdiv4hwn4_ld_64bit_unroll_width_per_chan_relu.cu * MegEngine is Licensed under the Apache License, Version 2.0 (the "License") * * Copyright (c) 2014-2020 Megvii Inc. All rights reserved. * * Unless required by applicable law or agreed to in writing, * software distributed under the License is distributed on an * "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. */ // generated by gen_cuda_conv_bias_kern_impls.py #include "../conv_bias_int8_implicit_gemm_cdiv4hwn4_ld_64bit_unroll_width.cuinl" template void megdnn::cuda::conv_bias_int8::do_conv_bias_int8_implicit_gemm_cdiv4hwn4_ld_64bit_unroll_width<PerChannelBiasVisitor, IConvEpilogue<Activation<megdnn::param_enumv::ConvBias::NonlineMode::RELU>>>( const int8_t* d_src, const int8_t* d_filter, PerChannelBiasVisitor bias, IConvEpilogue<Activation<megdnn::param_enumv::ConvBias::NonlineMode::RELU>> epilogue, const ConvParam& param, float alpha, float beta, hipStream_t stream);
b886f54d4ba6ccba4d6545bd22a2281ffc93cd4e.cu
/** * \file dnn/src/cuda/conv_bias/int8/kimpl/conv_bias_int8_implicit_gemm_cdiv4hwn4_ld_64bit_unroll_width_per_chan_relu.cu * MegEngine is Licensed under the Apache License, Version 2.0 (the "License") * * Copyright (c) 2014-2020 Megvii Inc. All rights reserved. * * Unless required by applicable law or agreed to in writing, * software distributed under the License is distributed on an * "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. */ // generated by gen_cuda_conv_bias_kern_impls.py #include "../conv_bias_int8_implicit_gemm_cdiv4hwn4_ld_64bit_unroll_width.cuinl" template void megdnn::cuda::conv_bias_int8::do_conv_bias_int8_implicit_gemm_cdiv4hwn4_ld_64bit_unroll_width<PerChannelBiasVisitor, IConvEpilogue<Activation<megdnn::param_enumv::ConvBias::NonlineMode::RELU>>>( const int8_t* d_src, const int8_t* d_filter, PerChannelBiasVisitor bias, IConvEpilogue<Activation<megdnn::param_enumv::ConvBias::NonlineMode::RELU>> epilogue, const ConvParam& param, float alpha, float beta, cudaStream_t stream);
722550b67a8ae2cb486b67e936fdaff26ad43dee.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" extern "C" __global__ void divElements(float * x, float * y, int n) { int tid = blockIdx.x * blockDim.x + threadIdx.x; if (tid < n) { x[tid] /= y[tid]; } } extern "C" __global__ void elemMax(float * dst, float * src, int n) { int tid = blockIdx.x * blockDim.x + threadIdx.x; if (tid < n) { dst[tid] = max(dst[tid], src[tid]); } } extern "C" __global__ void expElements(float * x, int n) { int tid = blockIdx.x * blockDim.x + threadIdx.x; if (tid < n) { x[tid] = expf(x[tid]); } } extern "C" __global__ void logElements(float * x, int n) { int tid = blockIdx.x * blockDim.x + threadIdx.x; if (tid < n) { x[tid] = logf(x[tid]); } } extern "C" __global__ void tanhElements(float * x, int n) { int tid = blockIdx.x * blockDim.x + threadIdx.x; if (tid < n) { x[tid] = tanhf(x[tid]); } } extern "C" __global__ void sinElements(float * x, int n) { int tid = blockIdx.x * blockDim.x + threadIdx.x; if (tid < n) { x[tid] = sinf(x[tid]); } } extern "C" __global__ void sigmoidElements(float * x, int n) { int tid = blockIdx.x * blockDim.x + threadIdx.x; if (tid < n) { x[tid] = (1 + tanhf(x[tid] / 2)) / 2; } } extern "C" __global__ void clipPositive(float * x, int n) { int tid = blockIdx.x * blockDim.x + threadIdx.x; if (tid < n) { x[tid] = fmaxf(0, x[tid]); } } extern "C" __global__ void shiftRandUniform(float * x, int n) { int tid = blockIdx.x * blockDim.x + threadIdx.x; if (tid < n) { if (x[tid] == 1.0f) { x[tid] = 0; } } } extern "C" __global__ void uniformToBernoulli(float * x, int n) { int tid = blockIdx.x * blockDim.x + threadIdx.x; if (tid < n) { if (x[tid] > 0.5) { x[tid] = 1; } else { x[tid] = 0; } } } extern "C" __global__ void addRepeated(float * dest, float * source, int destLen, int sourceLen) { int tid = blockIdx.x * blockDim.x + threadIdx.x; if (tid < destLen) { dest[tid] += source[tid % sourceLen]; } } extern "C" __global__ void addRepeatedPow2(float * dest, float * source, int destLen, int srcMask) { int tid = blockIdx.x * blockDim.x + threadIdx.x; if (tid < destLen) { dest[tid] += source[tid & srcMask]; } } extern "C" __global__ void scaleRepeated(float * dest, float * source, int destLen, int sourceLen) { int tid = blockIdx.x * blockDim.x + threadIdx.x; if (tid < destLen) { dest[tid] *= source[tid % sourceLen]; } } extern "C" __global__ void scaleRepeatedPow2(float * dest, float * source, int destLen, int srcMask) { int tid = blockIdx.x * blockDim.x + threadIdx.x; if (tid < destLen) { dest[tid] *= source[tid & srcMask]; } } extern "C" __global__ void addScaler(float s, float * dest, int destLen) { int tid = blockIdx.x * blockDim.x + threadIdx.x; if (tid < destLen) { dest[tid] += s; } } extern "C" __global__ void setScaler(float s, float * dest, int destLen) { int tid = blockIdx.x * blockDim.x + threadIdx.x; if (tid < destLen) { dest[tid] = s; } } extern "C" __global__ void addChunks(float * dest, float * source, int destLen, int chunkSize) { int tid = blockIdx.x * blockDim.x + threadIdx.x; if (tid < destLen) { dest[tid] += source[tid / chunkSize]; } } extern "C" __global__ void subChunks(float * dest, float * source, int destLen, int chunkSize) { int tid = blockIdx.x * blockDim.x + threadIdx.x; if (tid < destLen) { dest[tid] -= source[tid / chunkSize]; } } extern "C" __global__ void lessThan(float s, float * v, int n) { int tid = blockIdx.x * blockDim.x + threadIdx.x; if (tid < n) { if (v[tid] < s) { v[tid] = 1; } else { v[tid] = 0; } } } extern "C" __global__ void greaterThan(float s, float * v, int n) { int tid = blockIdx.x * blockDim.x + threadIdx.x; if (tid < n) { if (v[tid] > s) { v[tid] = 1; } else { v[tid] = 0; } } } extern "C" __global__ void equalTo(float s, float * v, int n) { int tid = blockIdx.x * blockDim.x + threadIdx.x; if (tid < n) { if (v[tid] == s) { v[tid] = 1; } else { v[tid] = 0; } } } extern "C" __device__ float addLogPair(float x, float y) { float m = max(x, y); return logf(expf(x-m) + expf(y-m)) + m; } extern "C" __global__ void addLogs(float * dst, float * src, int rowSize) { extern __shared__ float chunk[]; int rowIdx = blockIdx.y * blockDim.x + threadIdx.x; if (rowIdx < rowSize) { chunk[threadIdx.x] = src[rowIdx+rowSize*blockIdx.x]; } __syncthreads(); for (int stride = (blockDim.x>>1); stride >= 1; stride >>= 1) { if (threadIdx.x < stride && rowIdx+stride < rowSize) { chunk[threadIdx.x] = addLogPair(chunk[threadIdx.x], chunk[threadIdx.x+stride]); } __syncthreads(); } if (threadIdx.x == 0) { dst[blockIdx.y + blockIdx.x*gridDim.y] = chunk[0]; } } extern "C" __global__ void powScaler(float s, float * dest, int destLen) { int tid = blockIdx.x * blockDim.x + threadIdx.x; if (tid < destLen) { dest[tid] = powf(dest[tid], s); } } extern "C" __global__ void mapForward(float * dst, float * src, int * table, int tableSize) { int tid = blockIdx.x * blockDim.x + threadIdx.x; if (tid < tableSize) { dst[tid] = src[table[tid]]; } } extern "C" __global__ void mapBackward(float * dst, float * src, int * table, int tableSize) { int tid = blockIdx.x * blockDim.x + threadIdx.x; if (tid < tableSize) { atomicAdd(&dst[table[tid]], src[tid]); } } extern "C" __global__ void mapMax(int * table, float * data, int rows, int cols) { int tid = blockIdx.x * blockDim.x + threadIdx.x; if (tid < rows) { int base = tid * cols; float * row = &data[base]; int maxIdx = 0; float maxVal = row[0]; for (int i = 1; i < cols; ++i) { if (row[i] > maxVal) { maxVal = row[i]; maxIdx = i; } } table[tid] = maxIdx + base; } }
722550b67a8ae2cb486b67e936fdaff26ad43dee.cu
extern "C" __global__ void divElements(float * x, float * y, int n) { int tid = blockIdx.x * blockDim.x + threadIdx.x; if (tid < n) { x[tid] /= y[tid]; } } extern "C" __global__ void elemMax(float * dst, float * src, int n) { int tid = blockIdx.x * blockDim.x + threadIdx.x; if (tid < n) { dst[tid] = max(dst[tid], src[tid]); } } extern "C" __global__ void expElements(float * x, int n) { int tid = blockIdx.x * blockDim.x + threadIdx.x; if (tid < n) { x[tid] = expf(x[tid]); } } extern "C" __global__ void logElements(float * x, int n) { int tid = blockIdx.x * blockDim.x + threadIdx.x; if (tid < n) { x[tid] = logf(x[tid]); } } extern "C" __global__ void tanhElements(float * x, int n) { int tid = blockIdx.x * blockDim.x + threadIdx.x; if (tid < n) { x[tid] = tanhf(x[tid]); } } extern "C" __global__ void sinElements(float * x, int n) { int tid = blockIdx.x * blockDim.x + threadIdx.x; if (tid < n) { x[tid] = sinf(x[tid]); } } extern "C" __global__ void sigmoidElements(float * x, int n) { int tid = blockIdx.x * blockDim.x + threadIdx.x; if (tid < n) { x[tid] = (1 + tanhf(x[tid] / 2)) / 2; } } extern "C" __global__ void clipPositive(float * x, int n) { int tid = blockIdx.x * blockDim.x + threadIdx.x; if (tid < n) { x[tid] = fmaxf(0, x[tid]); } } extern "C" __global__ void shiftRandUniform(float * x, int n) { int tid = blockIdx.x * blockDim.x + threadIdx.x; if (tid < n) { if (x[tid] == 1.0f) { x[tid] = 0; } } } extern "C" __global__ void uniformToBernoulli(float * x, int n) { int tid = blockIdx.x * blockDim.x + threadIdx.x; if (tid < n) { if (x[tid] > 0.5) { x[tid] = 1; } else { x[tid] = 0; } } } extern "C" __global__ void addRepeated(float * dest, float * source, int destLen, int sourceLen) { int tid = blockIdx.x * blockDim.x + threadIdx.x; if (tid < destLen) { dest[tid] += source[tid % sourceLen]; } } extern "C" __global__ void addRepeatedPow2(float * dest, float * source, int destLen, int srcMask) { int tid = blockIdx.x * blockDim.x + threadIdx.x; if (tid < destLen) { dest[tid] += source[tid & srcMask]; } } extern "C" __global__ void scaleRepeated(float * dest, float * source, int destLen, int sourceLen) { int tid = blockIdx.x * blockDim.x + threadIdx.x; if (tid < destLen) { dest[tid] *= source[tid % sourceLen]; } } extern "C" __global__ void scaleRepeatedPow2(float * dest, float * source, int destLen, int srcMask) { int tid = blockIdx.x * blockDim.x + threadIdx.x; if (tid < destLen) { dest[tid] *= source[tid & srcMask]; } } extern "C" __global__ void addScaler(float s, float * dest, int destLen) { int tid = blockIdx.x * blockDim.x + threadIdx.x; if (tid < destLen) { dest[tid] += s; } } extern "C" __global__ void setScaler(float s, float * dest, int destLen) { int tid = blockIdx.x * blockDim.x + threadIdx.x; if (tid < destLen) { dest[tid] = s; } } extern "C" __global__ void addChunks(float * dest, float * source, int destLen, int chunkSize) { int tid = blockIdx.x * blockDim.x + threadIdx.x; if (tid < destLen) { dest[tid] += source[tid / chunkSize]; } } extern "C" __global__ void subChunks(float * dest, float * source, int destLen, int chunkSize) { int tid = blockIdx.x * blockDim.x + threadIdx.x; if (tid < destLen) { dest[tid] -= source[tid / chunkSize]; } } extern "C" __global__ void lessThan(float s, float * v, int n) { int tid = blockIdx.x * blockDim.x + threadIdx.x; if (tid < n) { if (v[tid] < s) { v[tid] = 1; } else { v[tid] = 0; } } } extern "C" __global__ void greaterThan(float s, float * v, int n) { int tid = blockIdx.x * blockDim.x + threadIdx.x; if (tid < n) { if (v[tid] > s) { v[tid] = 1; } else { v[tid] = 0; } } } extern "C" __global__ void equalTo(float s, float * v, int n) { int tid = blockIdx.x * blockDim.x + threadIdx.x; if (tid < n) { if (v[tid] == s) { v[tid] = 1; } else { v[tid] = 0; } } } extern "C" __device__ float addLogPair(float x, float y) { float m = max(x, y); return logf(expf(x-m) + expf(y-m)) + m; } extern "C" __global__ void addLogs(float * dst, float * src, int rowSize) { extern __shared__ float chunk[]; int rowIdx = blockIdx.y * blockDim.x + threadIdx.x; if (rowIdx < rowSize) { chunk[threadIdx.x] = src[rowIdx+rowSize*blockIdx.x]; } __syncthreads(); for (int stride = (blockDim.x>>1); stride >= 1; stride >>= 1) { if (threadIdx.x < stride && rowIdx+stride < rowSize) { chunk[threadIdx.x] = addLogPair(chunk[threadIdx.x], chunk[threadIdx.x+stride]); } __syncthreads(); } if (threadIdx.x == 0) { dst[blockIdx.y + blockIdx.x*gridDim.y] = chunk[0]; } } extern "C" __global__ void powScaler(float s, float * dest, int destLen) { int tid = blockIdx.x * blockDim.x + threadIdx.x; if (tid < destLen) { dest[tid] = powf(dest[tid], s); } } extern "C" __global__ void mapForward(float * dst, float * src, int * table, int tableSize) { int tid = blockIdx.x * blockDim.x + threadIdx.x; if (tid < tableSize) { dst[tid] = src[table[tid]]; } } extern "C" __global__ void mapBackward(float * dst, float * src, int * table, int tableSize) { int tid = blockIdx.x * blockDim.x + threadIdx.x; if (tid < tableSize) { atomicAdd(&dst[table[tid]], src[tid]); } } extern "C" __global__ void mapMax(int * table, float * data, int rows, int cols) { int tid = blockIdx.x * blockDim.x + threadIdx.x; if (tid < rows) { int base = tid * cols; float * row = &data[base]; int maxIdx = 0; float maxVal = row[0]; for (int i = 1; i < cols; ++i) { if (row[i] > maxVal) { maxVal = row[i]; maxIdx = i; } } table[tid] = maxIdx + base; } }
9cd7f1cb43a42dd2ab8298d951482567e46fce60.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #ifndef THC_GENERIC_FILE #define THC_GENERIC_FILE "generic/THCTensorMathBlas.cu" #else #define ERROR_ONLY_FP_TYPES(func) \ THError("%s for CUDA tensors only supports floating-point types. Try converting the tensors with .float()", func); THC_API accreal THCTensor_(dot)(THCState *state, THCTensor *self, THCTensor *src) { #if defined(THC_REAL_IS_FLOAT) || defined(THC_REAL_IS_DOUBLE) || defined(THC_REAL_IS_HALF) THCAssertSameGPU(THCTensor_(checkGPU)(state, 2, self, src)); THArgCheck(THCTensor_(nElement)(state, self) == THCTensor_(nElement)(state, src), 2, "sizes do not match"); self = THCTensor_(newContiguous)(state, self); src = THCTensor_(newContiguous)(state, src); #ifdef THC_REAL_IS_FLOAT accreal result = THCudaBlas_Sdot(state, THCTensor_(nElement)(state, self), THCTensor_(data)(state, self), 1, THCTensor_(data)(state, src), 1); #elif defined(THC_REAL_IS_DOUBLE) accreal result = THCudaBlas_Ddot(state, THCTensor_(nElement)(state, self), THCTensor_(data)(state, self), 1, THCTensor_(data)(state, src), 1); #elif defined(THC_REAL_IS_HALF) accreal result = ScalarConvert<half, accreal>::to( THCudaBlas_Hdot(state, THCTensor_(nElement)(state, self), THCTensor_(data)(state, self), 1, THCTensor_(data)(state, src), 1)); #endif THCTensor_(free)(state, src); THCTensor_(free)(state, self); return result; #else ERROR_ONLY_FP_TYPES("dot"); return ScalarConvert<int, accreal>::to(0); #endif } THC_API void THCTensor_(addmv)(THCState *state, THCTensor *r_, real beta, THCTensor *t, real alpha, THCTensor *mat, THCTensor *vec) { #if defined(THC_REAL_IS_FLOAT) || defined(THC_REAL_IS_DOUBLE) || defined(THC_REAL_IS_HALF) THCAssertSameGPU(THCTensor_(checkGPU)(state, 4, r_, t, mat, vec)); if( (mat->dim() != 2) || (THTensor_nDimensionLegacyNoScalars(vec) != 1) ) THError("2D tensor and 1D tensor expected, got %dD, %dD tensors", mat->dim(), THTensor_nDimensionLegacyNoScalars(vec)); auto vec_size = THTensor_sizeLegacyNoScalars(vec, 0); auto vec_stride = THTensor_strideLegacyNoScalars(vec, 0); if( mat->size(1) != THTensor_sizeLegacyNoScalars(vec, 0) ) THError("size mismatch"); if(t->dim() != 1) THError("size mismatch"); if(THTensor_sizeLegacyNoScalars(t, 0) != mat->size(0)) THError("size mismatch"); #if defined(THC_REAL_IS_FLOAT) || defined(THC_REAL_IS_DOUBLE) if(r_ != t) { THCTensor_(resizeAs)(state, r_, t); THCTensor_(copy)(state, r_, t); } auto r_stride = THTensor_strideLegacyNoScalars(r_, 0); if(mat->stride(0) == 1) { #ifdef THC_REAL_IS_FLOAT THCudaBlas_Sgemv(state, 'n', mat->size(0), mat->size(1), alpha, THCTensor_(data)(state, mat), mat->stride(1), THCTensor_(data)(state, vec), vec_stride, beta, THCTensor_(data)(state, r_), r_stride); #elif defined(THC_REAL_IS_DOUBLE) THCudaBlas_Dgemv(state, 'n', mat->size(0), mat->size(1), alpha, THCTensor_(data)(state, mat), mat->stride(1), THCTensor_(data)(state, vec), vec_stride, beta, THCTensor_(data)(state, r_), r_stride); #endif } else if(mat->stride(1) == 1) { #ifdef THC_REAL_IS_FLOAT THCudaBlas_Sgemv(state, 't', mat->size(1), mat->size(0), alpha, THCTensor_(data)(state, mat), mat->stride(0), THCTensor_(data)(state, vec), vec_stride, beta, THCTensor_(data)(state, r_), r_stride); #elif defined(THC_REAL_IS_DOUBLE) THCudaBlas_Dgemv(state, 't', mat->size(1), mat->size(0), alpha, THCTensor_(data)(state, mat), mat->stride(0), THCTensor_(data)(state, vec), vec_stride, beta, THCTensor_(data)(state, r_), r_stride); #endif } else { THCTensor *cmat = THCTensor_(newContiguous)(state, mat); #ifdef THC_REAL_IS_FLOAT THCudaBlas_Sgemv(state, 't', mat->size(1), mat->size(0), alpha, THCTensor_(data)(state, cmat), cmat->stride(0), THCTensor_(data)(state, vec), vec_stride, beta, THCTensor_(data)(state, r_), r_stride); #elif defined(THC_REAL_IS_DOUBLE) THCudaBlas_Dgemv(state, 't', mat->size(1), mat->size(0), alpha, THCTensor_(data)(state, cmat), cmat->stride(0), THCTensor_(data)(state, vec), vec_stride, beta, THCTensor_(data)(state, r_), r_stride); #endif THCTensor_(free)(state, cmat); } // In hipblasSgemv, hipblasDgemv (x,0).mv(0) does not // handle beta, whereas hipblasSgemm, hipblasDgemm do for case where (x,0).mm(0,y). if (THTensor_sizeLegacyNoScalars(vec, 0) == 0 && mat->size(0) != 0) { if(THCNumerics<real>::eq(beta, ScalarConvert<int, real>::to(0))) { THCTensor_(zero)(state, r_); } else if(THCNumerics<real>::ne(beta, ScalarConvert<int, real>::to(1))) { THCTensor_(mul)(state, r_, r_, beta); } } #elif defined(THC_REAL_IS_HALF) // Currently no Hgemv/SgemvEx in Cublas THCTensor *vecAsMatrix = THCTensor_(newWithTensor)(state, vec); THCTensor_(resize2d)(state, vecAsMatrix, vec_size, 1); THCTensor *tAsMatrix = THCTensor_(newWithTensor)(state, t); THCTensor_(resize2d)(state, tAsMatrix, THTensor_sizeLegacyNoScalars(tAsMatrix, 0), 1); THCTensor_(addmm)(state, r_, beta, tAsMatrix, alpha, mat, vecAsMatrix); // r_ will have answer as matrix, need to return a vector THCTensor_(resize1d)(state, r_, THTensor_sizeLegacyNoScalars(r_, 0)); THCTensor_(free)(state, vecAsMatrix); THCTensor_(free)(state, tAsMatrix); #endif #else ERROR_ONLY_FP_TYPES("addmv"); #endif } THC_API void THCTensor_(addr)(THCState *state, THCTensor *r_, real beta, THCTensor *t, real alpha, THCTensor *vec1, THCTensor *vec2) { #if defined(THC_REAL_IS_FLOAT) || defined(THC_REAL_IS_DOUBLE) || defined(THC_REAL_IS_HALF) THCAssertSameGPU(THCTensor_(checkGPU)(state, 4, r_, t, vec1, vec2)); if ( (THTensor_nDimensionLegacyNoScalars(vec1) != 1) || (THTensor_nDimensionLegacyNoScalars(vec2) != 1) ) { THError("1D tensors expected, got %dD, %dD tensors", THTensor_nDimensionLegacyNoScalars(vec1), THTensor_nDimensionLegacyNoScalars(vec2)); } auto vec1_size = THTensor_sizeLegacyNoScalars(vec1, 0); auto vec2_size = THTensor_sizeLegacyNoScalars(vec2, 0); auto vec1_stride = THTensor_strideLegacyNoScalars(vec1, 0); auto vec2_stride = THTensor_strideLegacyNoScalars(vec2, 0); if (t->dim() != 2) { THError("size mismatch"); } if ( (t->size(0) != vec1_size) || (t->size(1) != vec2_size) ) { THError("size mismatch"); } #if defined(THC_REAL_IS_FLOAT) || defined(THC_REAL_IS_DOUBLE) if (r_ != t) { THCTensor_(resizeAs)(state, r_, t); THCTensor_(copy)(state, r_, t); } if(THCNumerics<real>::eq(beta, ScalarConvert<int, real>::to(0))) { THCTensor_(zero)(state, r_); } else if(THCNumerics<real>::ne(beta, ScalarConvert<int, real>::to(1))) { THCTensor_(mul)(state, r_, r_, beta); } if(r_->stride(0) == 1) { #ifdef THC_REAL_IS_FLOAT THCudaBlas_Sger(state, vec1_size, vec2_size, alpha, THCTensor_(data)(state, vec1), vec1_stride, THCTensor_(data)(state, vec2), vec2_stride, THCTensor_(data)(state, r_), r_->stride(1)); #elif defined(THC_REAL_IS_DOUBLE) THCudaBlas_Dger(state, vec1_size, vec2_size, alpha, THCTensor_(data)(state, vec1), vec1_stride, THCTensor_(data)(state, vec2), vec2_stride, THCTensor_(data)(state, r_), r_->stride(1)); #endif } else if(r_->stride(1) == 1) { #ifdef THC_REAL_IS_FLOAT THCudaBlas_Sger(state, vec2_size, vec1_size, alpha, THCTensor_(data)(state, vec2), vec2_stride, THCTensor_(data)(state, vec1), vec1_stride, THCTensor_(data)(state, r_), r_->stride(0)); #elif defined(THC_REAL_IS_DOUBLE) THCudaBlas_Dger(state, vec2_size, vec1_size, alpha, THCTensor_(data)(state, vec2), vec2_stride, THCTensor_(data)(state, vec1), vec1_stride, THCTensor_(data)(state, r_), r_->stride(0)); #endif } else { THCTensor *cr = THCTensor_(newClone)(state, r_); #ifdef THC_REAL_IS_FLOAT THCudaBlas_Sger(state, vec2_size, vec1_size, alpha, THCTensor_(data)(state, vec2), vec2_stride, THCTensor_(data)(state, vec1), vec1_stride, THCTensor_(data)(state, cr), cr->stride(0)); #elif defined(THC_REAL_IS_DOUBLE) THCudaBlas_Dger(state, vec2_size, vec1_size, alpha, THCTensor_(data)(state, vec2), vec2_stride, THCTensor_(data)(state, vec1), vec1_stride, THCTensor_(data)(state, cr), cr->stride(0)); #endif THCTensor_(freeCopyTo)(state, cr, r_); } #elif defined(THC_REAL_IS_HALF) // currently no Hger/SgerEx in Cublas. THCTensor *vec2T = THCTensor_(newWithTensor)(state, vec2); THCTensor_(resize2d)(state, vec2T, vec2_size, 1); THCTensor_(transpose)(state, vec2T, NULL, 0, 1); THCTensor *vec1M = THCTensor_(newWithTensor)(state, vec1); THCTensor_(resize2d)(state, vec1M, vec1_size, 1); THCTensor_(addmm)(state, r_, beta, t, alpha, vec1M, vec2T); THCTensor_(free)(state, vec2T); THCTensor_(free)(state, vec1M); #endif #else ERROR_ONLY_FP_TYPES("addr"); #endif } THC_API void THCTensor_(addmm)(THCState *state, THCTensor *r_, real beta, THCTensor *t, real alpha, THCTensor *m1, THCTensor *m2) { #if defined(THC_REAL_IS_HALF) || defined(THC_REAL_IS_FLOAT) || defined(THC_REAL_IS_DOUBLE) THCAssertSameGPU(THCTensor_(checkGPU)(state, 4, r_, t, m1, m2)); char transpose_r, transpose_m1, transpose_m2; THCTensor *r__, *m1_, *m2_; if( (m1->dim() != 2) || (m2->dim() != 2) ) THError("2D tensors expected, got %dD, %dD tensors", m1->dim(), m2->dim()); if(t->dim() != 2) THError("2D tensor expected, got %dD tensor for t", t->dim()); if(m1->size(1) != m2->size(0)) { THCDescBuff bm1 = THCTensor_(sizeDesc)(state, m1); THCDescBuff bm2 = THCTensor_(sizeDesc)(state, m2); THError("size mismatch, m1: %s, m2: %s", bm1.str, bm2.str); } if( (t->size(0) != m1->size(0)) || (t->size(1) != m2->size(1)) ) { THCDescBuff bt = THCTensor_(sizeDesc)(state, t); THCDescBuff bm1 = THCTensor_(sizeDesc)(state, m1); THCDescBuff bm2 = THCTensor_(sizeDesc)(state, m2); THError("size mismatch, t: %s, m1: %s, m2: %s", bt.str, bm1.str, bm2.str); } if(t != r_) { THCTensor_(resizeAs)(state, r_, t); if (ScalarConvert<real, double>::to(beta) != 0.0) { THCTensor_(copy)(state, r_, t); } } /* r_ */ if(r_->stride(0) == 1 && r_->stride(1) != 0) { transpose_r = 'n'; r__ = r_; } else if(r_->stride(1) == 1 && r_->stride(0) != 0) { THCTensor *swap = m2; m2 = m1; m1 = swap; transpose_r = 't'; r__ = r_; } else { transpose_r = 'n'; THCTensor *transp_r_ = THCTensor_(newTranspose)(state, r_, 0, 1); r__ = THCTensor_(newClone)(state, transp_r_); THCTensor_(free)(state, transp_r_); THCTensor_(transpose)(state, r__, NULL, 0, 1); } /* m1 */ if(m1->stride((transpose_r == 'n' ? 0 : 1)) == 1 && m1->stride((transpose_r == 'n' ? 1 : 0)) != 0) { transpose_m1 = 'n'; m1_ = m1; } else if(m1->stride((transpose_r == 'n' ? 1 : 0)) == 1 && m1->stride((transpose_r == 'n' ? 0 : 1)) != 0) { transpose_m1 = 't'; m1_ = m1; } else { transpose_m1 = (transpose_r == 'n' ? 't' : 'n'); m1_ = THCTensor_(newContiguous)(state, m1); } /* m2 */ if(m2->stride((transpose_r == 'n' ? 0 : 1)) == 1 && m2->stride((transpose_r == 'n' ? 1 : 0)) != 0) { transpose_m2 = 'n'; m2_ = m2; } else if(m2->stride((transpose_r == 'n' ? 1 : 0)) == 1 && m2->stride((transpose_r == 'n' ? 0 : 1)) != 0) { transpose_m2 = 't'; m2_ = m2; } else { transpose_m2 = (transpose_r == 'n' ? 't' : 'n'); m2_ = THCTensor_(newContiguous)(state, m2); } #ifdef THC_REAL_IS_HALF THCudaBlas_Hgemm(state, transpose_m1, transpose_m2, r__->size((transpose_r == 'n' ? 0 : 1)), r__->size((transpose_r == 'n' ? 1 : 0)), m1_->size((transpose_r == 'n' ? 1 : 0)), alpha, THCTensor_(data)(state, m1_), (transpose_m1 == 'n' ? m1_->stride((transpose_r == 'n' ? 1 : 0)) : m1_->stride((transpose_r == 'n' ? 0 : 1))), THCTensor_(data)(state, m2_), (transpose_m2 == 'n' ? m2_->stride((transpose_r == 'n' ? 1 : 0)) : m2_->stride((transpose_r == 'n' ? 0 : 1))), beta, THCTensor_(data)(state, r__), r__->stride((transpose_r == 'n' ? 1 : 0))); #elif defined(THC_REAL_IS_FLOAT) THCudaBlas_Sgemm(state, transpose_m1, transpose_m2, r__->size((transpose_r == 'n' ? 0 : 1)), r__->size((transpose_r == 'n' ? 1 : 0)), m1_->size((transpose_r == 'n' ? 1 : 0)), alpha, THCTensor_(data)(state, m1_), (transpose_m1 == 'n' ? m1_->stride((transpose_r == 'n' ? 1 : 0)) : m1_->stride((transpose_r == 'n' ? 0 : 1))), THCTensor_(data)(state, m2_), (transpose_m2 == 'n' ? m2_->stride((transpose_r == 'n' ? 1 : 0)) : m2_->stride((transpose_r == 'n' ? 0 : 1))), beta, THCTensor_(data)(state, r__), r__->stride((transpose_r == 'n' ? 1 : 0))); #elif defined(THC_REAL_IS_DOUBLE) THCudaBlas_Dgemm(state, transpose_m1, transpose_m2, r__->size((transpose_r == 'n' ? 0 : 1)), r__->size((transpose_r == 'n' ? 1 : 0)), m1_->size((transpose_r == 'n' ? 1 : 0)), alpha, THCTensor_(data)(state, m1_), (transpose_m1 == 'n' ? m1_->stride((transpose_r == 'n' ? 1 : 0)) : m1_->stride((transpose_r == 'n' ? 0 : 1))), THCTensor_(data)(state, m2_), (transpose_m2 == 'n' ? m2_->stride((transpose_r == 'n' ? 1 : 0)) : m2_->stride((transpose_r == 'n' ? 0 : 1))), beta, THCTensor_(data)(state, r__), r__->stride((transpose_r == 'n' ? 1 : 0))); #endif /* free intermediate variables */ if(m1_ != m1) { THCTensor_(free)(state, m1_); } if(m2_ != m2) { THCTensor_(free)(state, m2_); } if(r__ != r_) { THCTensor_(freeCopyTo)(state, r__, r_); } #else ERROR_ONLY_FP_TYPES("addmm"); #endif } THC_API void THCTensor_(addbmm)(THCState *state, THCTensor *result, real beta, THCTensor *t, real alpha, THCTensor *batch1, THCTensor *batch2) { #if defined(THC_REAL_IS_HALF) || defined(THC_REAL_IS_FLOAT) || defined(THC_REAL_IS_DOUBLE) THCAssertSameGPU(THCTensor_(checkGPU)(state, 4, result, t, batch1, batch2)); THArgCheck(THCTensor_(nDimensionLegacyNoScalars)(state, t) == 2, 4, "expected 2D tensor"); THArgCheck(THCTensor_(nDimensionLegacyNoScalars)(state, batch1) == 3, 6, "expected 3D tensor"); THArgCheck(THCTensor_(nDimensionLegacyNoScalars)(state, batch2) == 3, 7, "expected 3D tensor"); int64_t batchnum = THCTensor_(size)(state, batch1, 0); int64_t m1d1 = THCTensor_(size)(state, batch1, 1); int64_t innerdim = THCTensor_(size)(state, batch1, 2); int64_t m2d2 = THCTensor_(size)(state, batch2, 2); THArgCheck(batchnum == THCTensor_(size)(state, batch2, 0), 7, "equal number of batches expected"); // M is t, as listed in the docs under addbmm THArgCheck(m1d1 == THCTensor_(size)(state, t, 0), 6, "first dimension must match first dimension of M"); THArgCheck(m2d2 == THCTensor_(size)(state, t, 1), 7, "second dimension must match second dimension of M"); THArgCheck(innerdim == THCTensor_(size)(state, batch2, 1), 6, "second dimension must match first dimension of batch2"); if (t != result) { THCTensor_(resizeAs)(state, result, t); if (ScalarConvert<real, double>::to(beta) != 0.0) { THCTensor_(copy)(state, result, t); } } THCTensor *slice1 = THCTensor_(new)(state); THCTensor *slice2 = THCTensor_(new)(state); for (int64_t i=0; i<batchnum; i++) { THCTensor_(select)(state, slice1, batch1, 0, i); THCTensor_(select)(state, slice2, batch2, 0, i); THCTensor_(addmm)(state, result, beta, result, alpha, slice1, slice2); beta = ScalarConvert<int, real>::to(1); } THCTensor_(free)(state, slice1); THCTensor_(free)(state, slice2); #else ERROR_ONLY_FP_TYPES("addbmm"); #endif } __global__ void createBatchGemmBuffer(const real** buffer, real* data, int64_t stride, int64_t num_batches) { const int64_t idx = blockIdx.x * blockDim.x + threadIdx.x; if (idx < num_batches) { buffer[idx] = data + idx * stride; } } __global__ void createBatchGemmBuffer3(const real** buffer1, const real ** buffer2, const real ** buffer3, real* data1, real * data2, real * data3, int64_t stride1, int64_t stride2, int64_t stride3, int64_t num_batches) { const int64_t idx = blockIdx.x * blockDim.x + threadIdx.x; if (idx < num_batches) { buffer1[idx] = data1 + idx * stride1; buffer2[idx] = data2 + idx * stride2; buffer3[idx] = data3 + idx * stride3; } } THC_API void THCTensor_(baddbmm)(THCState *state, THCTensor *result, real beta, THCTensor *t, real alpha, THCTensor *batch1, THCTensor *batch2) { #if defined(THC_REAL_IS_HALF) || defined(THC_REAL_IS_FLOAT) || defined(THC_REAL_IS_DOUBLE) THCAssertSameGPU(THCTensor_(checkGPU)(state, 4, result, t, batch1, batch2)); THArgCheck(THCTensor_(nDimensionLegacyNoScalars)(state, t) == 3, 4, "expected 3D tensor"); THArgCheck(THCTensor_(nDimensionLegacyNoScalars)(state, batch1) == 3, 6, "expected 3D tensor"); THArgCheck(THCTensor_(nDimensionLegacyNoScalars)(state, batch2) == 3, 7, "expected 3D tensor"); THArgCheck(THCTensor_(size)(state, t, 0) == THCTensor_(size)(state, batch1, 0), 6, "equal number of batches expected"); THArgCheck(THCTensor_(size)(state, t, 0) == THCTensor_(size)(state, batch2, 0), 7, "equal number of batches expected"); THArgCheck(THCTensor_(size)(state, t, 1) == THCTensor_(size)(state, batch1, 1), 6, "wrong matrix size"); THArgCheck(THCTensor_(size)(state, t, 2) == THCTensor_(size)(state, batch2, 2), 7, "wrong matrix size"); THArgCheck(THCTensor_(size)(state, batch1, 2) == THCTensor_(size)(state, batch2, 1), 6, "wrong matrix size"); if (t != result) { THCTensor_(resizeAs)(state, result, t); if (ScalarConvert<real, double>::to(beta) != 0.0) { THCTensor_(copy)(state, result, t); } } bool transpose_result; char transpose_batch1, transpose_batch2; int64_t lda, ldb, ldc; THCTensor *result_, *batch1_, *batch2_; if (result->stride(1) == 1) { transpose_result = false; result_ = result; ldc = result_->stride(2); } else if (result->stride(2) == 1) { transpose_result = true; THCTensor *swap = batch2; batch2 = batch1; batch1 = swap; result_ = result; ldc = result_->stride(1); } else { transpose_result = false; THCTensor *transp_r_ = THCTensor_(newTranspose)(state, result, 1, 2); result_ = THCTensor_(newClone)(state, transp_r_); THCTensor_(free)(state, transp_r_); THCTensor_(transpose)(state, result_, NULL, 1, 2); ldc = result_->stride(2); } if (batch1->stride(transpose_result ? 2 : 1) == 1 && batch1->stride(transpose_result ? 1 : 2) != 0) { transpose_batch1 = 'n'; batch1_ = batch1; lda = batch1_->stride(transpose_result ? 1 : 2); } else if (batch1->stride(transpose_result ? 1 : 2) == 1 && batch1->stride(transpose_result ? 2 : 1) != 0) { transpose_batch1 = 't'; batch1_ = batch1; lda = batch1_->stride(transpose_result ? 2 : 1); } else { transpose_batch1 = transpose_result ? 'n' : 't'; // batch1_ is later freed if batch1_ != batch1 if (THCTensor_(isContiguous)(state, batch1)) { batch1_ = batch1; } else { batch1_ = THCTensor_(newContiguous)(state, batch1); } lda = batch1_->stride(1); } if (batch2->stride(transpose_result ? 2 : 1) == 1 && batch2->stride(transpose_result ? 1 : 2) != 0) { transpose_batch2 = 'n'; batch2_ = batch2; ldb = batch2_->stride(transpose_result ? 1 : 2); } else if (batch2->stride(transpose_result ? 1 : 2) == 1 && batch2->stride(transpose_result ? 2 : 1) != 0) { transpose_batch2 = 't'; batch2_ = batch2; ldb = batch2_->stride(transpose_result ? 2 : 1); } else { transpose_batch2 = transpose_result ? 'n' : 't'; // batch2_ is later freed if batch2_ != batch2 if (THCTensor_(isContiguous)(state, batch2)) { batch2_ = batch2; } else { batch2_ = THCTensor_(newContiguous)(state, batch2); } ldb = batch2_->stride(1); } int64_t num_batches = result_->size(0); #if defined(THC_REAL_IS_FLOAT) || defined(THC_REAL_IS_DOUBLE) // Compute pointers to matrices in each batch. #if TORCH_HIP_VERSION < 8000 size_t matrices_size = num_batches * sizeof(real*); // Copy pointers to device. auto d_matrices1 = static_cast<const real**>(THCudaMalloc(state, matrices_size)); auto d_matrices2 = static_cast<const real**>(THCudaMalloc(state, matrices_size)); auto d_result_matrices = static_cast<real**>(THCudaMalloc(state, matrices_size)); const int64_t block = 512; const int64_t grid = (num_batches + block - 1) / block; hipLaunchKernelGGL(( createBatchGemmBuffer3), dim3(grid), dim3(block), 0, THCState_getCurrentStream(state), d_matrices1, d_matrices2, (const real**)d_result_matrices, THCTensor_(data)(state, batch1_), THCTensor_(data)(state, batch2_), THCTensor_(data)(state, result_), batch1_->stride(0), batch2_->stride(0), result_->stride(0), num_batches); #ifdef THC_REAL_IS_FLOAT THCudaBlas_SgemmBatched( state, transpose_batch1, transpose_batch2, result_->size(transpose_result ? 2 : 1), result_->size(transpose_result ? 1 : 2), batch1_->size(transpose_result ? 1 : 2), alpha, d_matrices1, lda, d_matrices2, ldb, beta, d_result_matrices, ldc, num_batches); #elif defined(THC_REAL_IS_DOUBLE) THCudaBlas_DgemmBatched( state, transpose_batch1, transpose_batch2, result_->size(transpose_result ? 2 : 1), result_->size(transpose_result ? 1 : 2), batch1_->size(transpose_result ? 1 : 2), alpha, d_matrices1, lda, d_matrices2, ldb, beta, d_result_matrices, ldc, num_batches); #endif //THC_REAL THCudaFree(state, d_matrices1); THCudaFree(state, d_matrices2); THCudaFree(state, d_result_matrices); #else #ifdef THC_REAL_IS_FLOAT THCudaBlas_SgemmStridedBatched( state, transpose_batch1, transpose_batch2, result_->size(transpose_result ? 2 : 1), result_->size(transpose_result ? 1 : 2), batch1_->size(transpose_result ? 1 : 2), alpha, THCTensor_(data)(state, batch1_), lda, batch1_->stride(0), THCTensor_(data)(state, batch2_), ldb, batch2_->stride(0), beta, THCTensor_(data)(state, result_), ldc, result_->stride(0), num_batches); #elif defined(THC_REAL_IS_DOUBLE) THCudaBlas_DgemmStridedBatched( state, transpose_batch1, transpose_batch2, result_->size(transpose_result ? 2 : 1), result_->size(transpose_result ? 1 : 2), batch1_->size(transpose_result ? 1 : 2), alpha, THCTensor_(data)(state, batch1_), lda, batch1_->stride(0), THCTensor_(data)(state, batch2_), ldb, batch2_->stride(0), beta, THCTensor_(data)(state, result_), ldc, result_->stride(0), num_batches); #endif //THC_REAL #endif //TORCH_HIP_VERSION #elif defined(THC_REAL_IS_HALF) #if TORCH_HIP_VERSION < 9010 // Currently no HgemmBatched in Cublas for (int64_t i = 0; i < num_batches; ++i) { THCudaBlas_Hgemm( state, transpose_batch1, transpose_batch2, result_->size(transpose_result ? 2 : 1), result_->size(transpose_result ? 1 : 2), batch1_->size(transpose_result ? 1 : 2), alpha, THCTensor_(data)(state, batch1_) + i * batch1_->stride(0), lda, THCTensor_(data)(state, batch2_) + i * batch2_->stride(0), ldb, beta, THCTensor_(data)(state, result_) + i * result_->stride(0), ldc); } #else hipDeviceProp_t* prop = THCState_getCurrentDeviceProperties(state); if (prop->major >= 5){ THCudaBlas_HgemmStridedBatched( state, transpose_batch1, transpose_batch2, result_->size(transpose_result ? 2 : 1), result_->size(transpose_result ? 1 : 2), batch1_->size(transpose_result ? 1 : 2), alpha, THCTensor_(data)(state, batch1_), lda, batch1_->stride(0), THCTensor_(data)(state, batch2_), ldb, batch2_->stride(0), beta, THCTensor_(data)(state, result_), ldc, result_->stride(0), num_batches); } else { for (int64_t i = 0; i < num_batches; ++i) { THCudaBlas_Hgemm( state, transpose_batch1, transpose_batch2, result_->size(transpose_result ? 2 : 1), result_->size(transpose_result ? 1 : 2), batch1_->size(transpose_result ? 1 : 2), alpha, THCTensor_(data)(state, batch1_) + i * batch1_->stride(0), lda, THCTensor_(data)(state, batch2_) + i * batch2_->stride(0), ldb, beta, THCTensor_(data)(state, result_) + i * result_->stride(0), ldc); } } #endif #endif if (batch1_ != batch1) { THCTensor_(free)(state, batch1_); } if (batch2_ != batch2) { THCTensor_(free)(state, batch2_); } if (result_ != result) { THCTensor_(freeCopyTo)(state, result_, result); } #else ERROR_ONLY_FP_TYPES("baddbmm"); #endif } THC_API void THCTensor_(btrifact)(THCState *state, THCTensor *ra_, THCudaIntTensor *rpivots_, THCudaIntTensor *rinfo_, int pivot, THCTensor *a) { #if defined(THC_REAL_IS_FLOAT) || defined(THC_REAL_IS_DOUBLE) THAssert(THCTensor_(checkGPU)(state, 2, ra_, a)); THArgCheck(THCTensor_(nDimensionLegacyNoScalars)(state, a) == 3, 3, "expected 3D tensor"); THArgCheck(THCTensor_(size)(state, a, 1) == THCTensor_(size)(state, a, 2), 3, "matrices must be square"); if (ra_ != a) { THCTensor_(resizeAs)(state, ra_, a); if (ra_->stride(2) == 1) { THCTensor_(transpose)(state, ra_, NULL, 1, 2); } THCTensor_(copy)(state, ra_, a); } int n = a->size(1); int lda; THCTensor *ra__; if (ra_->stride(1) == 1) { // column ordered, what BLAS wants lda = ra_->stride(2); ra__ = ra_; } else { // not column ordered, need to make it such (requires copy) THCTensor *transp_r_ = THCTensor_(newTranspose)(state, ra_, 1, 2); ra__ = THCTensor_(newClone)(state, transp_r_); THCTensor_(free)(state, transp_r_); THCTensor_(transpose)(state, ra__, NULL, 1, 2); lda = ra__->stride(2); } int64_t num_batches = ra__->size(0); if (!pivot) { THCudaIntTensor *t = THCudaIntTensor_new(state); THCudaIntTensor_range(state, t, 1, n, 1); THCudaIntTensor_unsqueeze1d(state, t, t, 0); THCudaIntTensor** ptrs = (THCudaIntTensor**) THAlloc(sizeof(THCudaIntTensor*)*num_batches); for (int64_t i=0; i<num_batches; i++) { ptrs[i] = t; } THCudaIntTensor_catArray(state, rpivots_, ptrs, num_batches, 0); THCudaIntTensor_free(state, t); THFree(ptrs); } else { THCudaIntTensor_resize2d(state, rpivots_, num_batches, n); } bool free_rinfo_ = !rinfo_; if (rinfo_ == NULL) rinfo_ = THCudaIntTensor_new(state); THCudaIntTensor_resize1d(state, rinfo_, num_batches); // THCudaBlas_Sgetrf,THCudaBlas_Dgetrf will NOT write out the info if the dimensionality is 0; // we could check this explicitly, but this seems safer. THCudaIntTensor_zero(state, rinfo_); int *info_gpu = THCudaIntTensor_data(state, rinfo_); // Copy pointers to device. size_t matrices_size = num_batches * sizeof(real*); auto d_result = static_cast<real**>(THCudaMalloc(state, matrices_size)); if (num_batches > 0) { const int64_t block = 512; const int64_t grid = (num_batches + block - 1) / block; hipLaunchKernelGGL(( createBatchGemmBuffer), dim3(grid), dim3(block), 0, THCState_getCurrentStream(state), (const real**)d_result, THCTensor_(data)(state, ra__), ra__->stride(0), num_batches); } int *pivots_gpu = NULL; if (pivot) { pivots_gpu = THCudaIntTensor_data(state, rpivots_); } #ifdef THC_REAL_IS_FLOAT THCudaBlas_Sgetrf(state, n, d_result, lda, pivots_gpu, info_gpu, num_batches); #elif defined(THC_REAL_IS_DOUBLE) THCudaBlas_Dgetrf(state, n, d_result, lda, pivots_gpu, info_gpu, num_batches); #endif THCudaFree(state, d_result); if (ra__ != ra_) { THCTensor_(freeCopyTo)(state, ra__, ra_); } if (free_rinfo_) { if(THCTensor_nElement(state, rinfo_) != 0) { int min = THCudaIntTensor_minall(state, rinfo_); int max = THCudaIntTensor_maxall(state, rinfo_); THCudaIntTensor_free(state, rinfo_); if (min != 0 || max != 0) { THError("failed to factorize some batch elements (min info == %d, max info == %d)", min, max); } } else { THCudaIntTensor_free(state, rinfo_); } } #else THError("btrifact for CUDA tensors is only supported for floats and doubles"); #endif } THC_API void THCTensor_(btrisolve)(THCState *state, THCTensor *rb_, THCTensor *b, THCTensor *atf, THCudaIntTensor *pivots) { #if defined(THC_REAL_IS_FLOAT) || defined(THC_REAL_IS_DOUBLE) THAssert(THCTensor_(checkGPU)(state, 3, rb_, atf, b)); THArgCheck(THCTensor_(nDimensionLegacyAll)(state, atf) == 3, 3, "expected 3D tensor"); THArgCheck(THCTensor_(nDimensionLegacyAll)(state, b) == 3 || THCTensor_(nDimensionLegacyAll)(state, b) == 2, 4, "expected 2D or 3D tensor"); THArgCheck(THCTensor_(size)(state, atf, 0) == THCTensor_(size)(state, b, 0), 3, "number of batches must be equal"); THArgCheck(THCTensor_(size)(state, atf, 1) == THCTensor_(size)(state, atf, 2), 3, "A matrices must be square"); THArgCheck(THCTensor_(size)(state, atf, 1) == THCTensor_(size)(state, b, 1), 3, "dimensions of A and b must be equal"); if (rb_ != b) { THCTensor_(resizeAs)(state, rb_, b); THCTensor_(copy)(state, rb_, b); } int n = atf->size(1); int nrhs = THTensor_nDimensionLegacyAll(rb_) > 2 ? rb_->size(2) : 1; THCTensor *atf_; THCTensor *rb__; int lda, ldb; // correct ordering of A_tf if (atf->stride(1) == 1) { // column ordered, what BLAS wants lda = atf->stride(2); atf_ = atf; } else { // not column ordered, need to make it such (requires copy) // it would be nice if we could use the op(A) flags to automatically // transpose A if needed, but this leads to unpredictable behavior if the // user clones A_tf later with a different ordering THCTensor *transp_r_ = THCTensor_(newTranspose)(state, atf, 1, 2); atf_ = THCTensor_(newClone)(state, transp_r_); THCTensor_(free)(state, transp_r_); THCTensor_(transpose)(state, atf_, NULL, 1, 2); lda = atf_->stride(2); } // correct ordering of B if (rb_->stride(1) == 1) { // column ordered if (THTensor_nDimensionLegacyAll(rb_) == 2 || rb_->size(2) == 1) { ldb = n; } else { ldb = rb_->stride(2); } rb__ = rb_; } else { // make column ordered if (THTensor_nDimensionLegacyAll(rb_) > 2) { THCTensor *transp_r_ = THCTensor_(newTranspose)(state, rb_, 1, 2); rb__ = THCTensor_(newClone)(state, transp_r_); THCTensor_(free)(state, transp_r_); THCTensor_(transpose)(state, rb__, NULL, 1, 2); ldb = rb__->stride(2); } else { rb__ = THCTensor_(newClone)(state, rb_); ldb = n; } } int64_t num_batches = rb_->size(0); size_t matrices_size = num_batches * sizeof(real*); // Copy pointers to device. auto d_result = static_cast<real**>(THCudaMalloc(state, matrices_size)); auto d_atf = static_cast<const real**>(THCudaMalloc(state, matrices_size)); const int64_t block = 512; const int64_t grid = (num_batches + block - 1) / block; hipLaunchKernelGGL(( createBatchGemmBuffer), dim3(grid), dim3(block), 0, THCState_getCurrentStream(state), (const real**)d_result, THCTensor_(data)(state, rb__), rb__->stride(0), num_batches); hipLaunchKernelGGL(( createBatchGemmBuffer), dim3(grid), dim3(block), 0, THCState_getCurrentStream(state), d_atf, THCTensor_(data)(state, atf_), atf_->stride(0), num_batches); if (!THCudaIntTensor_isContiguous(state, pivots)) { THError("Error: pivots is not contiguous."); } int *pivots_data = THCudaIntTensor_data(state, pivots); int info; #ifdef THC_REAL_IS_FLOAT THCudaBlas_Sgetrs(state, 'n', n, nrhs, d_atf, lda, pivots_data, d_result, ldb, &info, num_batches); #elif defined(THC_REAL_IS_DOUBLE) THCudaBlas_Dgetrs(state, 'n', n, nrhs, d_atf, lda, pivots_data, d_result, ldb, &info, num_batches); #endif if (info < 0) { THError("Illegal arg %d", -info); } THCudaFree(state, d_result); THCudaFree(state, d_atf); if (atf_ != atf) { THCTensor_(free)(state, atf_); } if (rb__ != rb_) { THCTensor_(freeCopyTo)(state, rb__, rb_); } #else THError("btrisolve for CUDA tensors is only supported for floats and doubles"); #endif } #endif
9cd7f1cb43a42dd2ab8298d951482567e46fce60.cu
#ifndef THC_GENERIC_FILE #define THC_GENERIC_FILE "generic/THCTensorMathBlas.cu" #else #define ERROR_ONLY_FP_TYPES(func) \ THError("%s for CUDA tensors only supports floating-point types. Try converting the tensors with .float()", func); THC_API accreal THCTensor_(dot)(THCState *state, THCTensor *self, THCTensor *src) { #if defined(THC_REAL_IS_FLOAT) || defined(THC_REAL_IS_DOUBLE) || defined(THC_REAL_IS_HALF) THCAssertSameGPU(THCTensor_(checkGPU)(state, 2, self, src)); THArgCheck(THCTensor_(nElement)(state, self) == THCTensor_(nElement)(state, src), 2, "sizes do not match"); self = THCTensor_(newContiguous)(state, self); src = THCTensor_(newContiguous)(state, src); #ifdef THC_REAL_IS_FLOAT accreal result = THCudaBlas_Sdot(state, THCTensor_(nElement)(state, self), THCTensor_(data)(state, self), 1, THCTensor_(data)(state, src), 1); #elif defined(THC_REAL_IS_DOUBLE) accreal result = THCudaBlas_Ddot(state, THCTensor_(nElement)(state, self), THCTensor_(data)(state, self), 1, THCTensor_(data)(state, src), 1); #elif defined(THC_REAL_IS_HALF) accreal result = ScalarConvert<half, accreal>::to( THCudaBlas_Hdot(state, THCTensor_(nElement)(state, self), THCTensor_(data)(state, self), 1, THCTensor_(data)(state, src), 1)); #endif THCTensor_(free)(state, src); THCTensor_(free)(state, self); return result; #else ERROR_ONLY_FP_TYPES("dot"); return ScalarConvert<int, accreal>::to(0); #endif } THC_API void THCTensor_(addmv)(THCState *state, THCTensor *r_, real beta, THCTensor *t, real alpha, THCTensor *mat, THCTensor *vec) { #if defined(THC_REAL_IS_FLOAT) || defined(THC_REAL_IS_DOUBLE) || defined(THC_REAL_IS_HALF) THCAssertSameGPU(THCTensor_(checkGPU)(state, 4, r_, t, mat, vec)); if( (mat->dim() != 2) || (THTensor_nDimensionLegacyNoScalars(vec) != 1) ) THError("2D tensor and 1D tensor expected, got %dD, %dD tensors", mat->dim(), THTensor_nDimensionLegacyNoScalars(vec)); auto vec_size = THTensor_sizeLegacyNoScalars(vec, 0); auto vec_stride = THTensor_strideLegacyNoScalars(vec, 0); if( mat->size(1) != THTensor_sizeLegacyNoScalars(vec, 0) ) THError("size mismatch"); if(t->dim() != 1) THError("size mismatch"); if(THTensor_sizeLegacyNoScalars(t, 0) != mat->size(0)) THError("size mismatch"); #if defined(THC_REAL_IS_FLOAT) || defined(THC_REAL_IS_DOUBLE) if(r_ != t) { THCTensor_(resizeAs)(state, r_, t); THCTensor_(copy)(state, r_, t); } auto r_stride = THTensor_strideLegacyNoScalars(r_, 0); if(mat->stride(0) == 1) { #ifdef THC_REAL_IS_FLOAT THCudaBlas_Sgemv(state, 'n', mat->size(0), mat->size(1), alpha, THCTensor_(data)(state, mat), mat->stride(1), THCTensor_(data)(state, vec), vec_stride, beta, THCTensor_(data)(state, r_), r_stride); #elif defined(THC_REAL_IS_DOUBLE) THCudaBlas_Dgemv(state, 'n', mat->size(0), mat->size(1), alpha, THCTensor_(data)(state, mat), mat->stride(1), THCTensor_(data)(state, vec), vec_stride, beta, THCTensor_(data)(state, r_), r_stride); #endif } else if(mat->stride(1) == 1) { #ifdef THC_REAL_IS_FLOAT THCudaBlas_Sgemv(state, 't', mat->size(1), mat->size(0), alpha, THCTensor_(data)(state, mat), mat->stride(0), THCTensor_(data)(state, vec), vec_stride, beta, THCTensor_(data)(state, r_), r_stride); #elif defined(THC_REAL_IS_DOUBLE) THCudaBlas_Dgemv(state, 't', mat->size(1), mat->size(0), alpha, THCTensor_(data)(state, mat), mat->stride(0), THCTensor_(data)(state, vec), vec_stride, beta, THCTensor_(data)(state, r_), r_stride); #endif } else { THCTensor *cmat = THCTensor_(newContiguous)(state, mat); #ifdef THC_REAL_IS_FLOAT THCudaBlas_Sgemv(state, 't', mat->size(1), mat->size(0), alpha, THCTensor_(data)(state, cmat), cmat->stride(0), THCTensor_(data)(state, vec), vec_stride, beta, THCTensor_(data)(state, r_), r_stride); #elif defined(THC_REAL_IS_DOUBLE) THCudaBlas_Dgemv(state, 't', mat->size(1), mat->size(0), alpha, THCTensor_(data)(state, cmat), cmat->stride(0), THCTensor_(data)(state, vec), vec_stride, beta, THCTensor_(data)(state, r_), r_stride); #endif THCTensor_(free)(state, cmat); } // In cublasSgemv, cublasDgemv (x,0).mv(0) does not // handle beta, whereas cublasSgemm, cublasDgemm do for case where (x,0).mm(0,y). if (THTensor_sizeLegacyNoScalars(vec, 0) == 0 && mat->size(0) != 0) { if(THCNumerics<real>::eq(beta, ScalarConvert<int, real>::to(0))) { THCTensor_(zero)(state, r_); } else if(THCNumerics<real>::ne(beta, ScalarConvert<int, real>::to(1))) { THCTensor_(mul)(state, r_, r_, beta); } } #elif defined(THC_REAL_IS_HALF) // Currently no Hgemv/SgemvEx in Cublas THCTensor *vecAsMatrix = THCTensor_(newWithTensor)(state, vec); THCTensor_(resize2d)(state, vecAsMatrix, vec_size, 1); THCTensor *tAsMatrix = THCTensor_(newWithTensor)(state, t); THCTensor_(resize2d)(state, tAsMatrix, THTensor_sizeLegacyNoScalars(tAsMatrix, 0), 1); THCTensor_(addmm)(state, r_, beta, tAsMatrix, alpha, mat, vecAsMatrix); // r_ will have answer as matrix, need to return a vector THCTensor_(resize1d)(state, r_, THTensor_sizeLegacyNoScalars(r_, 0)); THCTensor_(free)(state, vecAsMatrix); THCTensor_(free)(state, tAsMatrix); #endif #else ERROR_ONLY_FP_TYPES("addmv"); #endif } THC_API void THCTensor_(addr)(THCState *state, THCTensor *r_, real beta, THCTensor *t, real alpha, THCTensor *vec1, THCTensor *vec2) { #if defined(THC_REAL_IS_FLOAT) || defined(THC_REAL_IS_DOUBLE) || defined(THC_REAL_IS_HALF) THCAssertSameGPU(THCTensor_(checkGPU)(state, 4, r_, t, vec1, vec2)); if ( (THTensor_nDimensionLegacyNoScalars(vec1) != 1) || (THTensor_nDimensionLegacyNoScalars(vec2) != 1) ) { THError("1D tensors expected, got %dD, %dD tensors", THTensor_nDimensionLegacyNoScalars(vec1), THTensor_nDimensionLegacyNoScalars(vec2)); } auto vec1_size = THTensor_sizeLegacyNoScalars(vec1, 0); auto vec2_size = THTensor_sizeLegacyNoScalars(vec2, 0); auto vec1_stride = THTensor_strideLegacyNoScalars(vec1, 0); auto vec2_stride = THTensor_strideLegacyNoScalars(vec2, 0); if (t->dim() != 2) { THError("size mismatch"); } if ( (t->size(0) != vec1_size) || (t->size(1) != vec2_size) ) { THError("size mismatch"); } #if defined(THC_REAL_IS_FLOAT) || defined(THC_REAL_IS_DOUBLE) if (r_ != t) { THCTensor_(resizeAs)(state, r_, t); THCTensor_(copy)(state, r_, t); } if(THCNumerics<real>::eq(beta, ScalarConvert<int, real>::to(0))) { THCTensor_(zero)(state, r_); } else if(THCNumerics<real>::ne(beta, ScalarConvert<int, real>::to(1))) { THCTensor_(mul)(state, r_, r_, beta); } if(r_->stride(0) == 1) { #ifdef THC_REAL_IS_FLOAT THCudaBlas_Sger(state, vec1_size, vec2_size, alpha, THCTensor_(data)(state, vec1), vec1_stride, THCTensor_(data)(state, vec2), vec2_stride, THCTensor_(data)(state, r_), r_->stride(1)); #elif defined(THC_REAL_IS_DOUBLE) THCudaBlas_Dger(state, vec1_size, vec2_size, alpha, THCTensor_(data)(state, vec1), vec1_stride, THCTensor_(data)(state, vec2), vec2_stride, THCTensor_(data)(state, r_), r_->stride(1)); #endif } else if(r_->stride(1) == 1) { #ifdef THC_REAL_IS_FLOAT THCudaBlas_Sger(state, vec2_size, vec1_size, alpha, THCTensor_(data)(state, vec2), vec2_stride, THCTensor_(data)(state, vec1), vec1_stride, THCTensor_(data)(state, r_), r_->stride(0)); #elif defined(THC_REAL_IS_DOUBLE) THCudaBlas_Dger(state, vec2_size, vec1_size, alpha, THCTensor_(data)(state, vec2), vec2_stride, THCTensor_(data)(state, vec1), vec1_stride, THCTensor_(data)(state, r_), r_->stride(0)); #endif } else { THCTensor *cr = THCTensor_(newClone)(state, r_); #ifdef THC_REAL_IS_FLOAT THCudaBlas_Sger(state, vec2_size, vec1_size, alpha, THCTensor_(data)(state, vec2), vec2_stride, THCTensor_(data)(state, vec1), vec1_stride, THCTensor_(data)(state, cr), cr->stride(0)); #elif defined(THC_REAL_IS_DOUBLE) THCudaBlas_Dger(state, vec2_size, vec1_size, alpha, THCTensor_(data)(state, vec2), vec2_stride, THCTensor_(data)(state, vec1), vec1_stride, THCTensor_(data)(state, cr), cr->stride(0)); #endif THCTensor_(freeCopyTo)(state, cr, r_); } #elif defined(THC_REAL_IS_HALF) // currently no Hger/SgerEx in Cublas. THCTensor *vec2T = THCTensor_(newWithTensor)(state, vec2); THCTensor_(resize2d)(state, vec2T, vec2_size, 1); THCTensor_(transpose)(state, vec2T, NULL, 0, 1); THCTensor *vec1M = THCTensor_(newWithTensor)(state, vec1); THCTensor_(resize2d)(state, vec1M, vec1_size, 1); THCTensor_(addmm)(state, r_, beta, t, alpha, vec1M, vec2T); THCTensor_(free)(state, vec2T); THCTensor_(free)(state, vec1M); #endif #else ERROR_ONLY_FP_TYPES("addr"); #endif } THC_API void THCTensor_(addmm)(THCState *state, THCTensor *r_, real beta, THCTensor *t, real alpha, THCTensor *m1, THCTensor *m2) { #if defined(THC_REAL_IS_HALF) || defined(THC_REAL_IS_FLOAT) || defined(THC_REAL_IS_DOUBLE) THCAssertSameGPU(THCTensor_(checkGPU)(state, 4, r_, t, m1, m2)); char transpose_r, transpose_m1, transpose_m2; THCTensor *r__, *m1_, *m2_; if( (m1->dim() != 2) || (m2->dim() != 2) ) THError("2D tensors expected, got %dD, %dD tensors", m1->dim(), m2->dim()); if(t->dim() != 2) THError("2D tensor expected, got %dD tensor for t", t->dim()); if(m1->size(1) != m2->size(0)) { THCDescBuff bm1 = THCTensor_(sizeDesc)(state, m1); THCDescBuff bm2 = THCTensor_(sizeDesc)(state, m2); THError("size mismatch, m1: %s, m2: %s", bm1.str, bm2.str); } if( (t->size(0) != m1->size(0)) || (t->size(1) != m2->size(1)) ) { THCDescBuff bt = THCTensor_(sizeDesc)(state, t); THCDescBuff bm1 = THCTensor_(sizeDesc)(state, m1); THCDescBuff bm2 = THCTensor_(sizeDesc)(state, m2); THError("size mismatch, t: %s, m1: %s, m2: %s", bt.str, bm1.str, bm2.str); } if(t != r_) { THCTensor_(resizeAs)(state, r_, t); if (ScalarConvert<real, double>::to(beta) != 0.0) { THCTensor_(copy)(state, r_, t); } } /* r_ */ if(r_->stride(0) == 1 && r_->stride(1) != 0) { transpose_r = 'n'; r__ = r_; } else if(r_->stride(1) == 1 && r_->stride(0) != 0) { THCTensor *swap = m2; m2 = m1; m1 = swap; transpose_r = 't'; r__ = r_; } else { transpose_r = 'n'; THCTensor *transp_r_ = THCTensor_(newTranspose)(state, r_, 0, 1); r__ = THCTensor_(newClone)(state, transp_r_); THCTensor_(free)(state, transp_r_); THCTensor_(transpose)(state, r__, NULL, 0, 1); } /* m1 */ if(m1->stride((transpose_r == 'n' ? 0 : 1)) == 1 && m1->stride((transpose_r == 'n' ? 1 : 0)) != 0) { transpose_m1 = 'n'; m1_ = m1; } else if(m1->stride((transpose_r == 'n' ? 1 : 0)) == 1 && m1->stride((transpose_r == 'n' ? 0 : 1)) != 0) { transpose_m1 = 't'; m1_ = m1; } else { transpose_m1 = (transpose_r == 'n' ? 't' : 'n'); m1_ = THCTensor_(newContiguous)(state, m1); } /* m2 */ if(m2->stride((transpose_r == 'n' ? 0 : 1)) == 1 && m2->stride((transpose_r == 'n' ? 1 : 0)) != 0) { transpose_m2 = 'n'; m2_ = m2; } else if(m2->stride((transpose_r == 'n' ? 1 : 0)) == 1 && m2->stride((transpose_r == 'n' ? 0 : 1)) != 0) { transpose_m2 = 't'; m2_ = m2; } else { transpose_m2 = (transpose_r == 'n' ? 't' : 'n'); m2_ = THCTensor_(newContiguous)(state, m2); } #ifdef THC_REAL_IS_HALF THCudaBlas_Hgemm(state, transpose_m1, transpose_m2, r__->size((transpose_r == 'n' ? 0 : 1)), r__->size((transpose_r == 'n' ? 1 : 0)), m1_->size((transpose_r == 'n' ? 1 : 0)), alpha, THCTensor_(data)(state, m1_), (transpose_m1 == 'n' ? m1_->stride((transpose_r == 'n' ? 1 : 0)) : m1_->stride((transpose_r == 'n' ? 0 : 1))), THCTensor_(data)(state, m2_), (transpose_m2 == 'n' ? m2_->stride((transpose_r == 'n' ? 1 : 0)) : m2_->stride((transpose_r == 'n' ? 0 : 1))), beta, THCTensor_(data)(state, r__), r__->stride((transpose_r == 'n' ? 1 : 0))); #elif defined(THC_REAL_IS_FLOAT) THCudaBlas_Sgemm(state, transpose_m1, transpose_m2, r__->size((transpose_r == 'n' ? 0 : 1)), r__->size((transpose_r == 'n' ? 1 : 0)), m1_->size((transpose_r == 'n' ? 1 : 0)), alpha, THCTensor_(data)(state, m1_), (transpose_m1 == 'n' ? m1_->stride((transpose_r == 'n' ? 1 : 0)) : m1_->stride((transpose_r == 'n' ? 0 : 1))), THCTensor_(data)(state, m2_), (transpose_m2 == 'n' ? m2_->stride((transpose_r == 'n' ? 1 : 0)) : m2_->stride((transpose_r == 'n' ? 0 : 1))), beta, THCTensor_(data)(state, r__), r__->stride((transpose_r == 'n' ? 1 : 0))); #elif defined(THC_REAL_IS_DOUBLE) THCudaBlas_Dgemm(state, transpose_m1, transpose_m2, r__->size((transpose_r == 'n' ? 0 : 1)), r__->size((transpose_r == 'n' ? 1 : 0)), m1_->size((transpose_r == 'n' ? 1 : 0)), alpha, THCTensor_(data)(state, m1_), (transpose_m1 == 'n' ? m1_->stride((transpose_r == 'n' ? 1 : 0)) : m1_->stride((transpose_r == 'n' ? 0 : 1))), THCTensor_(data)(state, m2_), (transpose_m2 == 'n' ? m2_->stride((transpose_r == 'n' ? 1 : 0)) : m2_->stride((transpose_r == 'n' ? 0 : 1))), beta, THCTensor_(data)(state, r__), r__->stride((transpose_r == 'n' ? 1 : 0))); #endif /* free intermediate variables */ if(m1_ != m1) { THCTensor_(free)(state, m1_); } if(m2_ != m2) { THCTensor_(free)(state, m2_); } if(r__ != r_) { THCTensor_(freeCopyTo)(state, r__, r_); } #else ERROR_ONLY_FP_TYPES("addmm"); #endif } THC_API void THCTensor_(addbmm)(THCState *state, THCTensor *result, real beta, THCTensor *t, real alpha, THCTensor *batch1, THCTensor *batch2) { #if defined(THC_REAL_IS_HALF) || defined(THC_REAL_IS_FLOAT) || defined(THC_REAL_IS_DOUBLE) THCAssertSameGPU(THCTensor_(checkGPU)(state, 4, result, t, batch1, batch2)); THArgCheck(THCTensor_(nDimensionLegacyNoScalars)(state, t) == 2, 4, "expected 2D tensor"); THArgCheck(THCTensor_(nDimensionLegacyNoScalars)(state, batch1) == 3, 6, "expected 3D tensor"); THArgCheck(THCTensor_(nDimensionLegacyNoScalars)(state, batch2) == 3, 7, "expected 3D tensor"); int64_t batchnum = THCTensor_(size)(state, batch1, 0); int64_t m1d1 = THCTensor_(size)(state, batch1, 1); int64_t innerdim = THCTensor_(size)(state, batch1, 2); int64_t m2d2 = THCTensor_(size)(state, batch2, 2); THArgCheck(batchnum == THCTensor_(size)(state, batch2, 0), 7, "equal number of batches expected"); // M is t, as listed in the docs under addbmm THArgCheck(m1d1 == THCTensor_(size)(state, t, 0), 6, "first dimension must match first dimension of M"); THArgCheck(m2d2 == THCTensor_(size)(state, t, 1), 7, "second dimension must match second dimension of M"); THArgCheck(innerdim == THCTensor_(size)(state, batch2, 1), 6, "second dimension must match first dimension of batch2"); if (t != result) { THCTensor_(resizeAs)(state, result, t); if (ScalarConvert<real, double>::to(beta) != 0.0) { THCTensor_(copy)(state, result, t); } } THCTensor *slice1 = THCTensor_(new)(state); THCTensor *slice2 = THCTensor_(new)(state); for (int64_t i=0; i<batchnum; i++) { THCTensor_(select)(state, slice1, batch1, 0, i); THCTensor_(select)(state, slice2, batch2, 0, i); THCTensor_(addmm)(state, result, beta, result, alpha, slice1, slice2); beta = ScalarConvert<int, real>::to(1); } THCTensor_(free)(state, slice1); THCTensor_(free)(state, slice2); #else ERROR_ONLY_FP_TYPES("addbmm"); #endif } __global__ void createBatchGemmBuffer(const real** buffer, real* data, int64_t stride, int64_t num_batches) { const int64_t idx = blockIdx.x * blockDim.x + threadIdx.x; if (idx < num_batches) { buffer[idx] = data + idx * stride; } } __global__ void createBatchGemmBuffer3(const real** buffer1, const real ** buffer2, const real ** buffer3, real* data1, real * data2, real * data3, int64_t stride1, int64_t stride2, int64_t stride3, int64_t num_batches) { const int64_t idx = blockIdx.x * blockDim.x + threadIdx.x; if (idx < num_batches) { buffer1[idx] = data1 + idx * stride1; buffer2[idx] = data2 + idx * stride2; buffer3[idx] = data3 + idx * stride3; } } THC_API void THCTensor_(baddbmm)(THCState *state, THCTensor *result, real beta, THCTensor *t, real alpha, THCTensor *batch1, THCTensor *batch2) { #if defined(THC_REAL_IS_HALF) || defined(THC_REAL_IS_FLOAT) || defined(THC_REAL_IS_DOUBLE) THCAssertSameGPU(THCTensor_(checkGPU)(state, 4, result, t, batch1, batch2)); THArgCheck(THCTensor_(nDimensionLegacyNoScalars)(state, t) == 3, 4, "expected 3D tensor"); THArgCheck(THCTensor_(nDimensionLegacyNoScalars)(state, batch1) == 3, 6, "expected 3D tensor"); THArgCheck(THCTensor_(nDimensionLegacyNoScalars)(state, batch2) == 3, 7, "expected 3D tensor"); THArgCheck(THCTensor_(size)(state, t, 0) == THCTensor_(size)(state, batch1, 0), 6, "equal number of batches expected"); THArgCheck(THCTensor_(size)(state, t, 0) == THCTensor_(size)(state, batch2, 0), 7, "equal number of batches expected"); THArgCheck(THCTensor_(size)(state, t, 1) == THCTensor_(size)(state, batch1, 1), 6, "wrong matrix size"); THArgCheck(THCTensor_(size)(state, t, 2) == THCTensor_(size)(state, batch2, 2), 7, "wrong matrix size"); THArgCheck(THCTensor_(size)(state, batch1, 2) == THCTensor_(size)(state, batch2, 1), 6, "wrong matrix size"); if (t != result) { THCTensor_(resizeAs)(state, result, t); if (ScalarConvert<real, double>::to(beta) != 0.0) { THCTensor_(copy)(state, result, t); } } bool transpose_result; char transpose_batch1, transpose_batch2; int64_t lda, ldb, ldc; THCTensor *result_, *batch1_, *batch2_; if (result->stride(1) == 1) { transpose_result = false; result_ = result; ldc = result_->stride(2); } else if (result->stride(2) == 1) { transpose_result = true; THCTensor *swap = batch2; batch2 = batch1; batch1 = swap; result_ = result; ldc = result_->stride(1); } else { transpose_result = false; THCTensor *transp_r_ = THCTensor_(newTranspose)(state, result, 1, 2); result_ = THCTensor_(newClone)(state, transp_r_); THCTensor_(free)(state, transp_r_); THCTensor_(transpose)(state, result_, NULL, 1, 2); ldc = result_->stride(2); } if (batch1->stride(transpose_result ? 2 : 1) == 1 && batch1->stride(transpose_result ? 1 : 2) != 0) { transpose_batch1 = 'n'; batch1_ = batch1; lda = batch1_->stride(transpose_result ? 1 : 2); } else if (batch1->stride(transpose_result ? 1 : 2) == 1 && batch1->stride(transpose_result ? 2 : 1) != 0) { transpose_batch1 = 't'; batch1_ = batch1; lda = batch1_->stride(transpose_result ? 2 : 1); } else { transpose_batch1 = transpose_result ? 'n' : 't'; // batch1_ is later freed if batch1_ != batch1 if (THCTensor_(isContiguous)(state, batch1)) { batch1_ = batch1; } else { batch1_ = THCTensor_(newContiguous)(state, batch1); } lda = batch1_->stride(1); } if (batch2->stride(transpose_result ? 2 : 1) == 1 && batch2->stride(transpose_result ? 1 : 2) != 0) { transpose_batch2 = 'n'; batch2_ = batch2; ldb = batch2_->stride(transpose_result ? 1 : 2); } else if (batch2->stride(transpose_result ? 1 : 2) == 1 && batch2->stride(transpose_result ? 2 : 1) != 0) { transpose_batch2 = 't'; batch2_ = batch2; ldb = batch2_->stride(transpose_result ? 2 : 1); } else { transpose_batch2 = transpose_result ? 'n' : 't'; // batch2_ is later freed if batch2_ != batch2 if (THCTensor_(isContiguous)(state, batch2)) { batch2_ = batch2; } else { batch2_ = THCTensor_(newContiguous)(state, batch2); } ldb = batch2_->stride(1); } int64_t num_batches = result_->size(0); #if defined(THC_REAL_IS_FLOAT) || defined(THC_REAL_IS_DOUBLE) // Compute pointers to matrices in each batch. #if CUDA_VERSION < 8000 size_t matrices_size = num_batches * sizeof(real*); // Copy pointers to device. auto d_matrices1 = static_cast<const real**>(THCudaMalloc(state, matrices_size)); auto d_matrices2 = static_cast<const real**>(THCudaMalloc(state, matrices_size)); auto d_result_matrices = static_cast<real**>(THCudaMalloc(state, matrices_size)); const int64_t block = 512; const int64_t grid = (num_batches + block - 1) / block; createBatchGemmBuffer3<<<grid, block, 0, THCState_getCurrentStream(state)>>>( d_matrices1, d_matrices2, (const real**)d_result_matrices, THCTensor_(data)(state, batch1_), THCTensor_(data)(state, batch2_), THCTensor_(data)(state, result_), batch1_->stride(0), batch2_->stride(0), result_->stride(0), num_batches); #ifdef THC_REAL_IS_FLOAT THCudaBlas_SgemmBatched( state, transpose_batch1, transpose_batch2, result_->size(transpose_result ? 2 : 1), result_->size(transpose_result ? 1 : 2), batch1_->size(transpose_result ? 1 : 2), alpha, d_matrices1, lda, d_matrices2, ldb, beta, d_result_matrices, ldc, num_batches); #elif defined(THC_REAL_IS_DOUBLE) THCudaBlas_DgemmBatched( state, transpose_batch1, transpose_batch2, result_->size(transpose_result ? 2 : 1), result_->size(transpose_result ? 1 : 2), batch1_->size(transpose_result ? 1 : 2), alpha, d_matrices1, lda, d_matrices2, ldb, beta, d_result_matrices, ldc, num_batches); #endif //THC_REAL THCudaFree(state, d_matrices1); THCudaFree(state, d_matrices2); THCudaFree(state, d_result_matrices); #else #ifdef THC_REAL_IS_FLOAT THCudaBlas_SgemmStridedBatched( state, transpose_batch1, transpose_batch2, result_->size(transpose_result ? 2 : 1), result_->size(transpose_result ? 1 : 2), batch1_->size(transpose_result ? 1 : 2), alpha, THCTensor_(data)(state, batch1_), lda, batch1_->stride(0), THCTensor_(data)(state, batch2_), ldb, batch2_->stride(0), beta, THCTensor_(data)(state, result_), ldc, result_->stride(0), num_batches); #elif defined(THC_REAL_IS_DOUBLE) THCudaBlas_DgemmStridedBatched( state, transpose_batch1, transpose_batch2, result_->size(transpose_result ? 2 : 1), result_->size(transpose_result ? 1 : 2), batch1_->size(transpose_result ? 1 : 2), alpha, THCTensor_(data)(state, batch1_), lda, batch1_->stride(0), THCTensor_(data)(state, batch2_), ldb, batch2_->stride(0), beta, THCTensor_(data)(state, result_), ldc, result_->stride(0), num_batches); #endif //THC_REAL #endif //CUDA_VERSION #elif defined(THC_REAL_IS_HALF) #if CUDA_VERSION < 9010 // Currently no HgemmBatched in Cublas for (int64_t i = 0; i < num_batches; ++i) { THCudaBlas_Hgemm( state, transpose_batch1, transpose_batch2, result_->size(transpose_result ? 2 : 1), result_->size(transpose_result ? 1 : 2), batch1_->size(transpose_result ? 1 : 2), alpha, THCTensor_(data)(state, batch1_) + i * batch1_->stride(0), lda, THCTensor_(data)(state, batch2_) + i * batch2_->stride(0), ldb, beta, THCTensor_(data)(state, result_) + i * result_->stride(0), ldc); } #else cudaDeviceProp* prop = THCState_getCurrentDeviceProperties(state); if (prop->major >= 5){ THCudaBlas_HgemmStridedBatched( state, transpose_batch1, transpose_batch2, result_->size(transpose_result ? 2 : 1), result_->size(transpose_result ? 1 : 2), batch1_->size(transpose_result ? 1 : 2), alpha, THCTensor_(data)(state, batch1_), lda, batch1_->stride(0), THCTensor_(data)(state, batch2_), ldb, batch2_->stride(0), beta, THCTensor_(data)(state, result_), ldc, result_->stride(0), num_batches); } else { for (int64_t i = 0; i < num_batches; ++i) { THCudaBlas_Hgemm( state, transpose_batch1, transpose_batch2, result_->size(transpose_result ? 2 : 1), result_->size(transpose_result ? 1 : 2), batch1_->size(transpose_result ? 1 : 2), alpha, THCTensor_(data)(state, batch1_) + i * batch1_->stride(0), lda, THCTensor_(data)(state, batch2_) + i * batch2_->stride(0), ldb, beta, THCTensor_(data)(state, result_) + i * result_->stride(0), ldc); } } #endif #endif if (batch1_ != batch1) { THCTensor_(free)(state, batch1_); } if (batch2_ != batch2) { THCTensor_(free)(state, batch2_); } if (result_ != result) { THCTensor_(freeCopyTo)(state, result_, result); } #else ERROR_ONLY_FP_TYPES("baddbmm"); #endif } THC_API void THCTensor_(btrifact)(THCState *state, THCTensor *ra_, THCudaIntTensor *rpivots_, THCudaIntTensor *rinfo_, int pivot, THCTensor *a) { #if defined(THC_REAL_IS_FLOAT) || defined(THC_REAL_IS_DOUBLE) THAssert(THCTensor_(checkGPU)(state, 2, ra_, a)); THArgCheck(THCTensor_(nDimensionLegacyNoScalars)(state, a) == 3, 3, "expected 3D tensor"); THArgCheck(THCTensor_(size)(state, a, 1) == THCTensor_(size)(state, a, 2), 3, "matrices must be square"); if (ra_ != a) { THCTensor_(resizeAs)(state, ra_, a); if (ra_->stride(2) == 1) { THCTensor_(transpose)(state, ra_, NULL, 1, 2); } THCTensor_(copy)(state, ra_, a); } int n = a->size(1); int lda; THCTensor *ra__; if (ra_->stride(1) == 1) { // column ordered, what BLAS wants lda = ra_->stride(2); ra__ = ra_; } else { // not column ordered, need to make it such (requires copy) THCTensor *transp_r_ = THCTensor_(newTranspose)(state, ra_, 1, 2); ra__ = THCTensor_(newClone)(state, transp_r_); THCTensor_(free)(state, transp_r_); THCTensor_(transpose)(state, ra__, NULL, 1, 2); lda = ra__->stride(2); } int64_t num_batches = ra__->size(0); if (!pivot) { THCudaIntTensor *t = THCudaIntTensor_new(state); THCudaIntTensor_range(state, t, 1, n, 1); THCudaIntTensor_unsqueeze1d(state, t, t, 0); THCudaIntTensor** ptrs = (THCudaIntTensor**) THAlloc(sizeof(THCudaIntTensor*)*num_batches); for (int64_t i=0; i<num_batches; i++) { ptrs[i] = t; } THCudaIntTensor_catArray(state, rpivots_, ptrs, num_batches, 0); THCudaIntTensor_free(state, t); THFree(ptrs); } else { THCudaIntTensor_resize2d(state, rpivots_, num_batches, n); } bool free_rinfo_ = !rinfo_; if (rinfo_ == NULL) rinfo_ = THCudaIntTensor_new(state); THCudaIntTensor_resize1d(state, rinfo_, num_batches); // THCudaBlas_Sgetrf,THCudaBlas_Dgetrf will NOT write out the info if the dimensionality is 0; // we could check this explicitly, but this seems safer. THCudaIntTensor_zero(state, rinfo_); int *info_gpu = THCudaIntTensor_data(state, rinfo_); // Copy pointers to device. size_t matrices_size = num_batches * sizeof(real*); auto d_result = static_cast<real**>(THCudaMalloc(state, matrices_size)); if (num_batches > 0) { const int64_t block = 512; const int64_t grid = (num_batches + block - 1) / block; createBatchGemmBuffer<<<grid, block, 0, THCState_getCurrentStream(state)>>>( (const real**)d_result, THCTensor_(data)(state, ra__), ra__->stride(0), num_batches); } int *pivots_gpu = NULL; if (pivot) { pivots_gpu = THCudaIntTensor_data(state, rpivots_); } #ifdef THC_REAL_IS_FLOAT THCudaBlas_Sgetrf(state, n, d_result, lda, pivots_gpu, info_gpu, num_batches); #elif defined(THC_REAL_IS_DOUBLE) THCudaBlas_Dgetrf(state, n, d_result, lda, pivots_gpu, info_gpu, num_batches); #endif THCudaFree(state, d_result); if (ra__ != ra_) { THCTensor_(freeCopyTo)(state, ra__, ra_); } if (free_rinfo_) { if(THCTensor_nElement(state, rinfo_) != 0) { int min = THCudaIntTensor_minall(state, rinfo_); int max = THCudaIntTensor_maxall(state, rinfo_); THCudaIntTensor_free(state, rinfo_); if (min != 0 || max != 0) { THError("failed to factorize some batch elements (min info == %d, max info == %d)", min, max); } } else { THCudaIntTensor_free(state, rinfo_); } } #else THError("btrifact for CUDA tensors is only supported for floats and doubles"); #endif } THC_API void THCTensor_(btrisolve)(THCState *state, THCTensor *rb_, THCTensor *b, THCTensor *atf, THCudaIntTensor *pivots) { #if defined(THC_REAL_IS_FLOAT) || defined(THC_REAL_IS_DOUBLE) THAssert(THCTensor_(checkGPU)(state, 3, rb_, atf, b)); THArgCheck(THCTensor_(nDimensionLegacyAll)(state, atf) == 3, 3, "expected 3D tensor"); THArgCheck(THCTensor_(nDimensionLegacyAll)(state, b) == 3 || THCTensor_(nDimensionLegacyAll)(state, b) == 2, 4, "expected 2D or 3D tensor"); THArgCheck(THCTensor_(size)(state, atf, 0) == THCTensor_(size)(state, b, 0), 3, "number of batches must be equal"); THArgCheck(THCTensor_(size)(state, atf, 1) == THCTensor_(size)(state, atf, 2), 3, "A matrices must be square"); THArgCheck(THCTensor_(size)(state, atf, 1) == THCTensor_(size)(state, b, 1), 3, "dimensions of A and b must be equal"); if (rb_ != b) { THCTensor_(resizeAs)(state, rb_, b); THCTensor_(copy)(state, rb_, b); } int n = atf->size(1); int nrhs = THTensor_nDimensionLegacyAll(rb_) > 2 ? rb_->size(2) : 1; THCTensor *atf_; THCTensor *rb__; int lda, ldb; // correct ordering of A_tf if (atf->stride(1) == 1) { // column ordered, what BLAS wants lda = atf->stride(2); atf_ = atf; } else { // not column ordered, need to make it such (requires copy) // it would be nice if we could use the op(A) flags to automatically // transpose A if needed, but this leads to unpredictable behavior if the // user clones A_tf later with a different ordering THCTensor *transp_r_ = THCTensor_(newTranspose)(state, atf, 1, 2); atf_ = THCTensor_(newClone)(state, transp_r_); THCTensor_(free)(state, transp_r_); THCTensor_(transpose)(state, atf_, NULL, 1, 2); lda = atf_->stride(2); } // correct ordering of B if (rb_->stride(1) == 1) { // column ordered if (THTensor_nDimensionLegacyAll(rb_) == 2 || rb_->size(2) == 1) { ldb = n; } else { ldb = rb_->stride(2); } rb__ = rb_; } else { // make column ordered if (THTensor_nDimensionLegacyAll(rb_) > 2) { THCTensor *transp_r_ = THCTensor_(newTranspose)(state, rb_, 1, 2); rb__ = THCTensor_(newClone)(state, transp_r_); THCTensor_(free)(state, transp_r_); THCTensor_(transpose)(state, rb__, NULL, 1, 2); ldb = rb__->stride(2); } else { rb__ = THCTensor_(newClone)(state, rb_); ldb = n; } } int64_t num_batches = rb_->size(0); size_t matrices_size = num_batches * sizeof(real*); // Copy pointers to device. auto d_result = static_cast<real**>(THCudaMalloc(state, matrices_size)); auto d_atf = static_cast<const real**>(THCudaMalloc(state, matrices_size)); const int64_t block = 512; const int64_t grid = (num_batches + block - 1) / block; createBatchGemmBuffer<<<grid, block, 0, THCState_getCurrentStream(state)>>>( (const real**)d_result, THCTensor_(data)(state, rb__), rb__->stride(0), num_batches); createBatchGemmBuffer<<<grid, block, 0, THCState_getCurrentStream(state)>>>( d_atf, THCTensor_(data)(state, atf_), atf_->stride(0), num_batches); if (!THCudaIntTensor_isContiguous(state, pivots)) { THError("Error: pivots is not contiguous."); } int *pivots_data = THCudaIntTensor_data(state, pivots); int info; #ifdef THC_REAL_IS_FLOAT THCudaBlas_Sgetrs(state, 'n', n, nrhs, d_atf, lda, pivots_data, d_result, ldb, &info, num_batches); #elif defined(THC_REAL_IS_DOUBLE) THCudaBlas_Dgetrs(state, 'n', n, nrhs, d_atf, lda, pivots_data, d_result, ldb, &info, num_batches); #endif if (info < 0) { THError("Illegal arg %d", -info); } THCudaFree(state, d_result); THCudaFree(state, d_atf); if (atf_ != atf) { THCTensor_(free)(state, atf_); } if (rb__ != rb_) { THCTensor_(freeCopyTo)(state, rb__, rb_); } #else THError("btrisolve for CUDA tensors is only supported for floats and doubles"); #endif } #endif
94428fbb3f1f62c8abffa95fb9766b6d7d0fb412.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" // Tencent is pleased to support the open source community by making TNN available. // // Copyright (C) 2020 THL A29 Limited, a Tencent company. All rights reserved. // // Licensed under the BSD 3-Clause License (the "License"); you may not use this file except // in compliance with the License. You may obtain a copy of the License at // // https://opensource.org/licenses/BSD-3-Clause // // Unless required by applicable law or agreed to in writing, software distributed // under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR // CONDITIONS OF ANY KIND, either express or implied. See the License for the // specific language governing permissions and limitations under the License. #include "tnn/device/cuda/acc/cuda_pooling_layer_acc.h" #include "tnn/utils/dims_utils.h" namespace TNN_NS { __device__ int get_start_index(int a, int b, int c) { return (int)floorf((float)(a * c) / b); } __device__ int get_end_index(int a, int b, int c) { return (int)ceilf((float)((a + 1) * c) / b); } __global__ void adaptive_pooling_kernel(const float* input, float* output, int channels, int input_height, int input_width, int output_height, int output_width, int pool_type) { int tid = blockIdx.x * blockDim.x + threadIdx.x; if (tid >= output_height * output_width) return; const float* input_ptr = input + blockIdx.y * input_height * input_width; float* output_ptr = output + blockIdx.y * output_height * output_width; int oh = tid / output_width; int ow = tid % output_width; int ih0 = get_start_index(oh, output_height, input_height); int ih1 = get_end_index(oh, output_height, input_height); int kh = ih1 - ih0; int iw0 = get_start_index(ow, output_width, input_width); int iw1 = get_end_index(ow, output_width, input_width); int kw = iw1 - iw0; if (pool_type == 1) { float sum = 0; for (int ih = ih0; ih < ih1; ih++) { for (int iw = iw0; iw < iw1; iw++) { sum += input_ptr[ih * input_width + iw]; } } output_ptr[oh * output_width + ow] = sum / kh / kw; } } static bool IsGlobalPooling(PoolingLayerParam *param, const std::vector<Blob *> &inputs, const std::vector<Blob *> &outputs) { return param->kernels[1] == 0 && param->kernels[0] == 0; } CudaPoolingLayerAcc::~CudaPoolingLayerAcc() { cudnnDestroy(this->m_cudnn_handle); cudnnDestroyPoolingDescriptor(this->m_pooling_desc); cudnnDestroyTensorDescriptor(this->m_input_desc); cudnnDestroyTensorDescriptor(this->m_output_desc); } Status CudaPoolingLayerAcc::Init(Context *context, LayerParam *param, LayerResource *resource, const std::vector<Blob *> &inputs, const std::vector<Blob *> &outputs) { Status ret = CudaLayerAcc::Init(context, param, resource, inputs, outputs); if (ret != TNN_OK) { return ret; } auto params = dynamic_cast<PoolingLayerParam*>(param); if (params->pool_type == 0) { this->m_pooling_mode = CUDNN_POOLING_MAX; } else { this->m_pooling_mode = CUDNN_POOLING_AVERAGE_COUNT_EXCLUDE_PADDING; } this->m_tensor_format = CUDNN_TENSOR_NCHW; this->m_data_type = CUDNN_DATA_FLOAT; cudnnCreate(&m_cudnn_handle); cudnnCreatePoolingDescriptor(&m_pooling_desc); cudnnCreateTensorDescriptor(&m_input_desc); cudnnCreateTensorDescriptor(&m_output_desc); auto input_dims = inputs[0]->GetBlobDesc().dims; auto output_dims = outputs[0]->GetBlobDesc().dims; is_global = IsGlobalPooling(params, inputs, outputs); cudnnSetPooling2dDescriptor(this->m_pooling_desc, this->m_pooling_mode, CUDNN_PROPAGATE_NAN, params->kernels[1], params->kernels[0], params->pads[2], params->pads[0], params->strides[1], params->strides[0]); cudnnSetTensor4dDescriptor(this->m_input_desc, this->m_tensor_format, this->m_data_type, input_dims[0], input_dims[1], input_dims[2], input_dims[3]); cudnnSetTensor4dDescriptor(this->m_output_desc, this->m_tensor_format, this->m_data_type, output_dims[0], output_dims[1], output_dims[2], output_dims[3]); return TNN_OK; } Status CudaPoolingLayerAcc::Reshape(const std::vector<Blob *> &inputs, const std::vector<Blob *> &outputs) { auto params = dynamic_cast<PoolingLayerParam *>(param_); auto input_dims = inputs[0]->GetBlobDesc().dims; auto output_dims = outputs[0]->GetBlobDesc().dims; if (is_global) { cudnnSetPooling2dDescriptor(this->m_pooling_desc, this->m_pooling_mode, CUDNN_PROPAGATE_NAN, input_dims[2], input_dims[3], params->pads[2], params->pads[0], params->strides[1], params->strides[0]); cudnnSetTensor4dDescriptor(this->m_input_desc, this->m_tensor_format, this->m_data_type, input_dims[0], input_dims[1], input_dims[2], input_dims[3]); cudnnSetTensor4dDescriptor(this->m_output_desc, this->m_tensor_format, this->m_data_type, output_dims[0], output_dims[1], output_dims[2], output_dims[3]); } return TNN_OK; } Status CudaPoolingLayerAcc::Forward(const std::vector<Blob *> &inputs, const std::vector<Blob *> &outputs) { auto param = dynamic_cast<PoolingLayerParam *>(param_); if (!param) { return Status(TNNERR_MODEL_ERR, "Error: PoolingLayerParam is nil"); } Blob *input_blob = inputs[0]; Blob *output_blob = outputs[0]; float* input_data = static_cast<float*>(input_blob->GetHandle().base); float* output_data = static_cast<float*>(output_blob->GetHandle().base); if (param->is_adaptive_pool) { auto input_dims = input_blob->GetBlobDesc().dims; auto output_dims = output_blob->GetBlobDesc().dims; bool is_1d = input_dims.size() == 3; int channels = is_1d ? input_dims[0] : input_dims[0] * input_dims[1]; int input_height = is_1d ? input_dims[1] : input_dims[2]; int input_width = is_1d ? input_dims[2] : input_dims[3]; int output_height = is_1d ? output_dims[1] : output_dims[2]; int output_width = is_1d ? output_dims[2] : output_dims[3]; int count = output_height*output_width; dim3 grid(TNN_CUDA_GET_BLOCKS(count), channels); hipLaunchKernelGGL(( adaptive_pooling_kernel), dim3(grid), dim3(TNN_CUDA_NUM_THREADS), 0, context_->GetStream(), input_data, output_data, channels, input_height, input_width, output_height, output_width, param->pool_type); } else { float alpha = 1.f; float beta = 0.f; cudnnPoolingForward(this->m_cudnn_handle, this->m_pooling_desc, &alpha, m_input_desc, input_data, &beta, m_output_desc, output_data); } return TNN_OK; } REGISTER_CUDA_ACC(Pooling, LAYER_POOLING); } // namespace TNN_NS
94428fbb3f1f62c8abffa95fb9766b6d7d0fb412.cu
// Tencent is pleased to support the open source community by making TNN available. // // Copyright (C) 2020 THL A29 Limited, a Tencent company. All rights reserved. // // Licensed under the BSD 3-Clause License (the "License"); you may not use this file except // in compliance with the License. You may obtain a copy of the License at // // https://opensource.org/licenses/BSD-3-Clause // // Unless required by applicable law or agreed to in writing, software distributed // under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR // CONDITIONS OF ANY KIND, either express or implied. See the License for the // specific language governing permissions and limitations under the License. #include "tnn/device/cuda/acc/cuda_pooling_layer_acc.h" #include "tnn/utils/dims_utils.h" namespace TNN_NS { __device__ int get_start_index(int a, int b, int c) { return (int)floorf((float)(a * c) / b); } __device__ int get_end_index(int a, int b, int c) { return (int)ceilf((float)((a + 1) * c) / b); } __global__ void adaptive_pooling_kernel(const float* input, float* output, int channels, int input_height, int input_width, int output_height, int output_width, int pool_type) { int tid = blockIdx.x * blockDim.x + threadIdx.x; if (tid >= output_height * output_width) return; const float* input_ptr = input + blockIdx.y * input_height * input_width; float* output_ptr = output + blockIdx.y * output_height * output_width; int oh = tid / output_width; int ow = tid % output_width; int ih0 = get_start_index(oh, output_height, input_height); int ih1 = get_end_index(oh, output_height, input_height); int kh = ih1 - ih0; int iw0 = get_start_index(ow, output_width, input_width); int iw1 = get_end_index(ow, output_width, input_width); int kw = iw1 - iw0; if (pool_type == 1) { float sum = 0; for (int ih = ih0; ih < ih1; ih++) { for (int iw = iw0; iw < iw1; iw++) { sum += input_ptr[ih * input_width + iw]; } } output_ptr[oh * output_width + ow] = sum / kh / kw; } } static bool IsGlobalPooling(PoolingLayerParam *param, const std::vector<Blob *> &inputs, const std::vector<Blob *> &outputs) { return param->kernels[1] == 0 && param->kernels[0] == 0; } CudaPoolingLayerAcc::~CudaPoolingLayerAcc() { cudnnDestroy(this->m_cudnn_handle); cudnnDestroyPoolingDescriptor(this->m_pooling_desc); cudnnDestroyTensorDescriptor(this->m_input_desc); cudnnDestroyTensorDescriptor(this->m_output_desc); } Status CudaPoolingLayerAcc::Init(Context *context, LayerParam *param, LayerResource *resource, const std::vector<Blob *> &inputs, const std::vector<Blob *> &outputs) { Status ret = CudaLayerAcc::Init(context, param, resource, inputs, outputs); if (ret != TNN_OK) { return ret; } auto params = dynamic_cast<PoolingLayerParam*>(param); if (params->pool_type == 0) { this->m_pooling_mode = CUDNN_POOLING_MAX; } else { this->m_pooling_mode = CUDNN_POOLING_AVERAGE_COUNT_EXCLUDE_PADDING; } this->m_tensor_format = CUDNN_TENSOR_NCHW; this->m_data_type = CUDNN_DATA_FLOAT; cudnnCreate(&m_cudnn_handle); cudnnCreatePoolingDescriptor(&m_pooling_desc); cudnnCreateTensorDescriptor(&m_input_desc); cudnnCreateTensorDescriptor(&m_output_desc); auto input_dims = inputs[0]->GetBlobDesc().dims; auto output_dims = outputs[0]->GetBlobDesc().dims; is_global = IsGlobalPooling(params, inputs, outputs); cudnnSetPooling2dDescriptor(this->m_pooling_desc, this->m_pooling_mode, CUDNN_PROPAGATE_NAN, params->kernels[1], params->kernels[0], params->pads[2], params->pads[0], params->strides[1], params->strides[0]); cudnnSetTensor4dDescriptor(this->m_input_desc, this->m_tensor_format, this->m_data_type, input_dims[0], input_dims[1], input_dims[2], input_dims[3]); cudnnSetTensor4dDescriptor(this->m_output_desc, this->m_tensor_format, this->m_data_type, output_dims[0], output_dims[1], output_dims[2], output_dims[3]); return TNN_OK; } Status CudaPoolingLayerAcc::Reshape(const std::vector<Blob *> &inputs, const std::vector<Blob *> &outputs) { auto params = dynamic_cast<PoolingLayerParam *>(param_); auto input_dims = inputs[0]->GetBlobDesc().dims; auto output_dims = outputs[0]->GetBlobDesc().dims; if (is_global) { cudnnSetPooling2dDescriptor(this->m_pooling_desc, this->m_pooling_mode, CUDNN_PROPAGATE_NAN, input_dims[2], input_dims[3], params->pads[2], params->pads[0], params->strides[1], params->strides[0]); cudnnSetTensor4dDescriptor(this->m_input_desc, this->m_tensor_format, this->m_data_type, input_dims[0], input_dims[1], input_dims[2], input_dims[3]); cudnnSetTensor4dDescriptor(this->m_output_desc, this->m_tensor_format, this->m_data_type, output_dims[0], output_dims[1], output_dims[2], output_dims[3]); } return TNN_OK; } Status CudaPoolingLayerAcc::Forward(const std::vector<Blob *> &inputs, const std::vector<Blob *> &outputs) { auto param = dynamic_cast<PoolingLayerParam *>(param_); if (!param) { return Status(TNNERR_MODEL_ERR, "Error: PoolingLayerParam is nil"); } Blob *input_blob = inputs[0]; Blob *output_blob = outputs[0]; float* input_data = static_cast<float*>(input_blob->GetHandle().base); float* output_data = static_cast<float*>(output_blob->GetHandle().base); if (param->is_adaptive_pool) { auto input_dims = input_blob->GetBlobDesc().dims; auto output_dims = output_blob->GetBlobDesc().dims; bool is_1d = input_dims.size() == 3; int channels = is_1d ? input_dims[0] : input_dims[0] * input_dims[1]; int input_height = is_1d ? input_dims[1] : input_dims[2]; int input_width = is_1d ? input_dims[2] : input_dims[3]; int output_height = is_1d ? output_dims[1] : output_dims[2]; int output_width = is_1d ? output_dims[2] : output_dims[3]; int count = output_height*output_width; dim3 grid(TNN_CUDA_GET_BLOCKS(count), channels); adaptive_pooling_kernel<<<grid, TNN_CUDA_NUM_THREADS, 0, context_->GetStream()>>>( input_data, output_data, channels, input_height, input_width, output_height, output_width, param->pool_type); } else { float alpha = 1.f; float beta = 0.f; cudnnPoolingForward(this->m_cudnn_handle, this->m_pooling_desc, &alpha, m_input_desc, input_data, &beta, m_output_desc, output_data); } return TNN_OK; } REGISTER_CUDA_ACC(Pooling, LAYER_POOLING); } // namespace TNN_NS
2f9c53108d2c6bd4a6e704926ec710ed294b37be.hip
// !!! This is a file automatically generated by hipify!!! #include <cmath> #include <complex> #include <helper_cuda.h> #include <iomanip> #include <iostream> #include <limits> #include <random> #include <cufinufft.h> #include <cufinufft/impl.h> #include <cufinufft/utils.h> #include <thrust/complex.h> #include <thrust/device_vector.h> #include <thrust/host_vector.h> using cufinufft::utils::infnorm; template <typename T> int run_test(int method, int type, int N1, int N2, int ntransf, int maxbatchsize, int M, T tol, T checktol, int iflag) { std::cout << std::scientific << std::setprecision(3); int ier; const int N = N1 * N2; printf("#modes = %d, #inputs = %d, #NUpts = %d\n", N, ntransf, M); thrust::host_vector<T> x(M), y(M); thrust::host_vector<thrust::complex<T>> c(M * ntransf), fk(ntransf * N1 * N2); thrust::device_vector<T> d_x(M), d_y(M); thrust::device_vector<thrust::complex<T>> d_c(M * ntransf), d_fk(ntransf * N1 * N2); std::default_random_engine eng(1); std::uniform_real_distribution<T> dist11(-1, 1); auto randm11 = [&eng, &dist11]() { return dist11(eng); }; // Making data for (int i = 0; i < M; i++) { x[i] = M_PI * randm11(); // x in [-pi,pi) y[i] = M_PI * randm11(); } if (type == 1) { for (int i = 0; i < ntransf * M; i++) { c[i].real(randm11()); c[i].imag(randm11()); } } else if (type == 2) { for (int i = 0; i < ntransf * N1 * N2; i++) { fk[i].real(randm11()); fk[i].imag(randm11()); } } else { std::cerr << "Invalid type " << type << " supplied\n"; return 1; } d_x = x; d_y = y; if (type == 1) d_c = c; else if (type == 2) d_fk = fk; hipEvent_t start, stop; float milliseconds = 0; double totaltime = 0; hipEventCreate(&start); hipEventCreate(&stop); // warm up CUFFT (is slow, takes around 0.2 sec... ) hipEventRecord(start); { int nf1 = 1; hipfftHandle fftplan; hipfftPlan1d(&fftplan, nf1, cufft_type<T>(), 1); } hipEventRecord(stop); hipEventSynchronize(stop); hipEventElapsedTime(&milliseconds, start, stop); printf("[time ] dummy warmup call to CUFFT\t %.3g s\n", milliseconds / 1000); // now to the test... cufinufft_plan_t<T> *dplan; int dim = 2; // Here we setup our own opts, for gpu_method. cufinufft_opts opts; ier = cufinufft_default_opts(type, dim, &opts); if (ier != 0) { printf("err %d: cufinufft_default_opts\n", ier); return ier; } opts.gpu_method = method; opts.gpu_maxbatchsize = maxbatchsize; int nmodes[3] = {N1, N2, 1}; hipEventRecord(start); ier = cufinufft_makeplan_impl<T>(type, dim, nmodes, iflag, ntransf, tol, &dplan, &opts); if (ier != 0) { printf("err: cufinufft2d_plan\n"); return ier; } hipEventRecord(stop); hipEventSynchronize(stop); hipEventElapsedTime(&milliseconds, start, stop); totaltime += milliseconds; printf("[time ] cufinufft plan:\t\t %.3g s\n", milliseconds / 1000); hipEventRecord(start); ier = cufinufft_setpts_impl<T>(M, d_x.data().get(), d_y.data().get(), NULL, 0, NULL, NULL, NULL, dplan); if (ier != 0) { printf("err: cufinufft_setpts\n"); return ier; } hipEventRecord(stop); hipEventSynchronize(stop); hipEventElapsedTime(&milliseconds, start, stop); totaltime += milliseconds; printf("[time ] cufinufft setNUpts:\t\t %.3g s\n", milliseconds / 1000); hipEventRecord(start); ier = cufinufft_execute_impl<T>((cuda_complex<T> *)d_c.data().get(), (cuda_complex<T> *)d_fk.data().get(), dplan); if (ier != 0) { printf("err: cufinufft2d_exec\n"); return ier; } hipEventRecord(stop); hipEventSynchronize(stop); hipEventElapsedTime(&milliseconds, start, stop); float exec_ms = milliseconds; totaltime += milliseconds; printf("[time ] cufinufft exec:\t\t %.3g s\n", milliseconds / 1000); hipEventRecord(start); ier = cufinufft_destroy_impl<T>(dplan); hipEventRecord(stop); hipEventSynchronize(stop); hipEventElapsedTime(&milliseconds, start, stop); totaltime += milliseconds; printf("[time ] cufinufft destroy:\t\t %.3g s\n", milliseconds / 1000); if (type == 1) fk = d_fk; else if (type == 2) c = d_c; T rel_error = std::numeric_limits<T>::max(); if (type == 1) { int i = ntransf - 1; // // choose some data to check int nt1 = (int)(0.37 * N1), nt2 = (int)(0.26 * N2); // choose some mode index to check thrust::complex<T> Ft = thrust::complex<T>(0, 0), J = thrust::complex<T>(0.0, iflag); for (int j = 0; j < M; ++j) Ft += c[j + i * M] * exp(J * (nt1 * x[j] + nt2 * y[j])); // crude direct int it = N1 / 2 + nt1 + N1 * (N2 / 2 + nt2); // index in complex F as 1d array rel_error = abs(Ft - fk[it + i * N]) / infnorm(N1, (std::complex<T> *)fk.data() + i * N); printf("[gpu ] %dth data one mode: rel err in F[%d,%d] is %.3g\n", i, nt1, nt2, rel_error); } else if (type == 2) { const int t = ntransf - 1; thrust::complex<T> *fkstart = fk.data() + t * N1 * N2; const thrust::complex<T> *cstart = c.data() + t * M; const int jt = M / 2; // check arbitrary choice of one targ pt const thrust::complex<T> J(0, iflag); thrust::complex<T> ct(0, 0); int m = 0; for (int m2 = -(N2 / 2); m2 <= (N2 - 1) / 2; ++m2) // loop in correct order over F for (int m1 = -(N1 / 2); m1 <= (N1 - 1) / 2; ++m1) ct += fkstart[m++] * exp(J * (m1 * x[jt] + m2 * y[jt])); // crude direct rel_error = abs(cstart[jt] - ct) / infnorm(M, (std::complex<T> *)c.data()); printf("[gpu ] %dth data one targ: rel err in c[%d] is %.3g\n", t, jt, rel_error); } printf("[totaltime] %.3g us, speed %.3g NUpts/s\n", totaltime * 1000, M * ntransf / totaltime * 1000); printf("\t\t\t\t\t(exec-only thoughput: %.3g NU pts/s)\n", M * ntransf / exec_ms * 1000); return std::isnan(rel_error) || rel_error > checktol; } int main(int argc, char *argv[]) { if (argc != 11) { fprintf(stderr, "Usage: cufinufft2d1many_test method type N1 N2 ntransf maxbatchsize M tol checktol\n" "Arguments:\n" " method: One of\n" " 1: nupts driven,\n" " 2: sub-problem, or\n" " type: Type of transform (1, 2)\n" " N1, N2: The size of the 2D array\n" " ntransf: Number of inputs\n" " maxbatchsize: Number of simultaneous transforms (or 0 for default)\n" " M: The number of non-uniform points\n" " tol: NUFFT tolerance\n" " checktol: relative error to pass test\n" " prec: 'f' or 'd' (float/double)\n"); return 1; } const int method = atoi(argv[1]); const int type = atoi(argv[2]); const int N1 = atof(argv[3]); const int N2 = atof(argv[4]); const int ntransf = atof(argv[5]); const int maxbatchsize = atoi(argv[6]); const int M = atoi(argv[7]); const double tol = atof(argv[8]); const double checktol = atof(argv[9]); const char prec = argv[10][0]; const int iflag = 1; if (prec == 'f') return run_test<float>(method, type, N1, N2, ntransf, maxbatchsize, M, tol, checktol, iflag); else if (prec == 'd') return run_test<double>(method, type, N1, N2, ntransf, maxbatchsize, M, tol, checktol, iflag); else return -1; }
2f9c53108d2c6bd4a6e704926ec710ed294b37be.cu
#include <cmath> #include <complex> #include <helper_cuda.h> #include <iomanip> #include <iostream> #include <limits> #include <random> #include <cufinufft.h> #include <cufinufft/impl.h> #include <cufinufft/utils.h> #include <thrust/complex.h> #include <thrust/device_vector.h> #include <thrust/host_vector.h> using cufinufft::utils::infnorm; template <typename T> int run_test(int method, int type, int N1, int N2, int ntransf, int maxbatchsize, int M, T tol, T checktol, int iflag) { std::cout << std::scientific << std::setprecision(3); int ier; const int N = N1 * N2; printf("#modes = %d, #inputs = %d, #NUpts = %d\n", N, ntransf, M); thrust::host_vector<T> x(M), y(M); thrust::host_vector<thrust::complex<T>> c(M * ntransf), fk(ntransf * N1 * N2); thrust::device_vector<T> d_x(M), d_y(M); thrust::device_vector<thrust::complex<T>> d_c(M * ntransf), d_fk(ntransf * N1 * N2); std::default_random_engine eng(1); std::uniform_real_distribution<T> dist11(-1, 1); auto randm11 = [&eng, &dist11]() { return dist11(eng); }; // Making data for (int i = 0; i < M; i++) { x[i] = M_PI * randm11(); // x in [-pi,pi) y[i] = M_PI * randm11(); } if (type == 1) { for (int i = 0; i < ntransf * M; i++) { c[i].real(randm11()); c[i].imag(randm11()); } } else if (type == 2) { for (int i = 0; i < ntransf * N1 * N2; i++) { fk[i].real(randm11()); fk[i].imag(randm11()); } } else { std::cerr << "Invalid type " << type << " supplied\n"; return 1; } d_x = x; d_y = y; if (type == 1) d_c = c; else if (type == 2) d_fk = fk; cudaEvent_t start, stop; float milliseconds = 0; double totaltime = 0; cudaEventCreate(&start); cudaEventCreate(&stop); // warm up CUFFT (is slow, takes around 0.2 sec... ) cudaEventRecord(start); { int nf1 = 1; cufftHandle fftplan; cufftPlan1d(&fftplan, nf1, cufft_type<T>(), 1); } cudaEventRecord(stop); cudaEventSynchronize(stop); cudaEventElapsedTime(&milliseconds, start, stop); printf("[time ] dummy warmup call to CUFFT\t %.3g s\n", milliseconds / 1000); // now to the test... cufinufft_plan_t<T> *dplan; int dim = 2; // Here we setup our own opts, for gpu_method. cufinufft_opts opts; ier = cufinufft_default_opts(type, dim, &opts); if (ier != 0) { printf("err %d: cufinufft_default_opts\n", ier); return ier; } opts.gpu_method = method; opts.gpu_maxbatchsize = maxbatchsize; int nmodes[3] = {N1, N2, 1}; cudaEventRecord(start); ier = cufinufft_makeplan_impl<T>(type, dim, nmodes, iflag, ntransf, tol, &dplan, &opts); if (ier != 0) { printf("err: cufinufft2d_plan\n"); return ier; } cudaEventRecord(stop); cudaEventSynchronize(stop); cudaEventElapsedTime(&milliseconds, start, stop); totaltime += milliseconds; printf("[time ] cufinufft plan:\t\t %.3g s\n", milliseconds / 1000); cudaEventRecord(start); ier = cufinufft_setpts_impl<T>(M, d_x.data().get(), d_y.data().get(), NULL, 0, NULL, NULL, NULL, dplan); if (ier != 0) { printf("err: cufinufft_setpts\n"); return ier; } cudaEventRecord(stop); cudaEventSynchronize(stop); cudaEventElapsedTime(&milliseconds, start, stop); totaltime += milliseconds; printf("[time ] cufinufft setNUpts:\t\t %.3g s\n", milliseconds / 1000); cudaEventRecord(start); ier = cufinufft_execute_impl<T>((cuda_complex<T> *)d_c.data().get(), (cuda_complex<T> *)d_fk.data().get(), dplan); if (ier != 0) { printf("err: cufinufft2d_exec\n"); return ier; } cudaEventRecord(stop); cudaEventSynchronize(stop); cudaEventElapsedTime(&milliseconds, start, stop); float exec_ms = milliseconds; totaltime += milliseconds; printf("[time ] cufinufft exec:\t\t %.3g s\n", milliseconds / 1000); cudaEventRecord(start); ier = cufinufft_destroy_impl<T>(dplan); cudaEventRecord(stop); cudaEventSynchronize(stop); cudaEventElapsedTime(&milliseconds, start, stop); totaltime += milliseconds; printf("[time ] cufinufft destroy:\t\t %.3g s\n", milliseconds / 1000); if (type == 1) fk = d_fk; else if (type == 2) c = d_c; T rel_error = std::numeric_limits<T>::max(); if (type == 1) { int i = ntransf - 1; // // choose some data to check int nt1 = (int)(0.37 * N1), nt2 = (int)(0.26 * N2); // choose some mode index to check thrust::complex<T> Ft = thrust::complex<T>(0, 0), J = thrust::complex<T>(0.0, iflag); for (int j = 0; j < M; ++j) Ft += c[j + i * M] * exp(J * (nt1 * x[j] + nt2 * y[j])); // crude direct int it = N1 / 2 + nt1 + N1 * (N2 / 2 + nt2); // index in complex F as 1d array rel_error = abs(Ft - fk[it + i * N]) / infnorm(N1, (std::complex<T> *)fk.data() + i * N); printf("[gpu ] %dth data one mode: rel err in F[%d,%d] is %.3g\n", i, nt1, nt2, rel_error); } else if (type == 2) { const int t = ntransf - 1; thrust::complex<T> *fkstart = fk.data() + t * N1 * N2; const thrust::complex<T> *cstart = c.data() + t * M; const int jt = M / 2; // check arbitrary choice of one targ pt const thrust::complex<T> J(0, iflag); thrust::complex<T> ct(0, 0); int m = 0; for (int m2 = -(N2 / 2); m2 <= (N2 - 1) / 2; ++m2) // loop in correct order over F for (int m1 = -(N1 / 2); m1 <= (N1 - 1) / 2; ++m1) ct += fkstart[m++] * exp(J * (m1 * x[jt] + m2 * y[jt])); // crude direct rel_error = abs(cstart[jt] - ct) / infnorm(M, (std::complex<T> *)c.data()); printf("[gpu ] %dth data one targ: rel err in c[%d] is %.3g\n", t, jt, rel_error); } printf("[totaltime] %.3g us, speed %.3g NUpts/s\n", totaltime * 1000, M * ntransf / totaltime * 1000); printf("\t\t\t\t\t(exec-only thoughput: %.3g NU pts/s)\n", M * ntransf / exec_ms * 1000); return std::isnan(rel_error) || rel_error > checktol; } int main(int argc, char *argv[]) { if (argc != 11) { fprintf(stderr, "Usage: cufinufft2d1many_test method type N1 N2 ntransf maxbatchsize M tol checktol\n" "Arguments:\n" " method: One of\n" " 1: nupts driven,\n" " 2: sub-problem, or\n" " type: Type of transform (1, 2)\n" " N1, N2: The size of the 2D array\n" " ntransf: Number of inputs\n" " maxbatchsize: Number of simultaneous transforms (or 0 for default)\n" " M: The number of non-uniform points\n" " tol: NUFFT tolerance\n" " checktol: relative error to pass test\n" " prec: 'f' or 'd' (float/double)\n"); return 1; } const int method = atoi(argv[1]); const int type = atoi(argv[2]); const int N1 = atof(argv[3]); const int N2 = atof(argv[4]); const int ntransf = atof(argv[5]); const int maxbatchsize = atoi(argv[6]); const int M = atoi(argv[7]); const double tol = atof(argv[8]); const double checktol = atof(argv[9]); const char prec = argv[10][0]; const int iflag = 1; if (prec == 'f') return run_test<float>(method, type, N1, N2, ntransf, maxbatchsize, M, tol, checktol, iflag); else if (prec == 'd') return run_test<double>(method, type, N1, N2, ntransf, maxbatchsize, M, tol, checktol, iflag); else return -1; }
fc54eead8c3e9b2d29f096f80caf93724146100f.hip
// !!! This is a file automatically generated by hipify!!! /** * 3mm.cu: This file is part of the PolyBench/GPU 1.0 test suite. * * * Contact: Scott Grauer-Gray <[email protected]> * Louis-Noel Pouchet <[email protected]> * Web address: http://www.cse.ohio-state.edu/~pouchet/software/polybench/GPU * * Updated by Grigori Fursin (http://cTuning.org/lab/people/gfursin) * to work with Collective Mind, OpenME plugin interface and * Collective Knowledge Frameworks for automatic, machine-learning based * and collective tuning and data mining: http://cTuning.org * */ #ifndef WINDOWS #include <unistd.h> #endif #include <stdio.h> #include <stdlib.h> #include <math.h> #include <assert.h> #include <hip/hip_runtime.h> #include "polybench.h" #ifdef OPENME #include <openme.h> #endif #ifdef XOPENME #include <xopenme.h> #endif #define GPU_DEVICE 0 //define the error threshold for the results "not matching" #define PERCENT_DIFF_ERROR_THRESHOLD 0.05 /* Problem size. */ #ifndef NI # define NI 256 //512 #endif #ifndef NJ # define NJ 256 //512 #endif #ifndef NK # define NK 256 //512 #endif #ifndef NL # define NL 256 //512 #endif #ifndef NM # define NM 256 //512 #endif /* Thread block dimensions */ #ifndef DIM_THREAD_BLOCK_X #define DIM_THREAD_BLOCK_X 8 //32 #endif #ifndef DIM_THREAD_BLOCK_Y #define DIM_THREAD_BLOCK_Y 8 #endif /* Can switch DATA_TYPE between float and double */ # ifndef DATA_TYPE # define DATA_TYPE float # endif void init_array(DATA_TYPE* A, DATA_TYPE* B, DATA_TYPE* C, DATA_TYPE* D) { int i, j; for (i = 0; i < NI; i++) { for (j = 0; j < NK; j++) { A[i*NK + j] = ((DATA_TYPE) i*j) / NI; } } for (i = 0; i < NK; i++) { for (j = 0; j < NJ; j++) { B[i*NJ + j] = ((DATA_TYPE) i*(j+1)) / NJ; } } for (i = 0; i < NJ; i++) { for (j = 0; j < NM; j++) { C[i*NM + j] = ((DATA_TYPE) i*(j+3)) / NL; } } for (i = 0; i < NM; i++) { for (j = 0; j < NL; j++) { D[i*NL + j] = ((DATA_TYPE) i*(j+2)) / NK; } } } void compareResults(DATA_TYPE *G, DATA_TYPE *G_outputFromGpu) { int i,j,fail; fail = 0; for (i=0; i < NI; i++) { for (j=0; j < NL; j++) { if (percentDiff(G[i*NL + j], G_outputFromGpu[i*NL + j]) > PERCENT_DIFF_ERROR_THRESHOLD) { fail++; } } } // print results printf("Non-Matching CPU-GPU Outputs Beyond Error Threshold of %4.2f Percent: %d\n", PERCENT_DIFF_ERROR_THRESHOLD, fail); } void GPU_argv_init() { /* Grigori Fursin added support for CK widgets */ int gpgpu_device_id=GPU_DEVICE; int devID = 0; hipError_t error; hipDeviceProp_t deviceProp; error = hipGetDevice(&devID); if (getenv("CK_COMPUTE_DEVICE_ID")!=NULL) gpgpu_device_id=atol(getenv("CK_COMPUTE_DEVICE_ID")); hipGetDeviceProperties(&deviceProp, gpgpu_device_id); if (deviceProp.computeMode == hipComputeModeProhibited) { printf("Error: device is running in <Compute Mode Prohibited>, no threads can use ::hipSetDevice().\n"); exit(EXIT_SUCCESS); } if (error != hipSuccess) printf("hipGetDeviceProperties returned error code %d, line(%d)\n", error, __LINE__); else printf("GPU Device %d: \"%s\" with compute capability %d.%d\n\n", devID, deviceProp.name, deviceProp.major, deviceProp.minor); hipSetDevice( gpgpu_device_id ); } __global__ void mm3_kernel1(DATA_TYPE *A, DATA_TYPE *B, DATA_TYPE *E) { int j = blockIdx.x * blockDim.x + threadIdx.x; int i = blockIdx.y * blockDim.y + threadIdx.y; if ((i < NI) && (j < NJ)) { int k; for(k=0; k < NK; k++) { E[i * NJ + j] += A[i * NK + k] * B[k * NJ + j]; } } } __global__ void mm3_kernel2(DATA_TYPE *C, DATA_TYPE *D, DATA_TYPE *F) { int j = blockIdx.x * blockDim.x + threadIdx.x; int i = blockIdx.y * blockDim.y + threadIdx.y; if ((i < NJ) && (j < NL)) { int k; for(k=0; k < NM; k++) { F[i * NL + j] += C[i * NM + k] * D[k * NL +j]; } } } __global__ void mm3_kernel3(DATA_TYPE *E, DATA_TYPE *F, DATA_TYPE *G) { int j = blockIdx.x * blockDim.x + threadIdx.x; int i = blockIdx.y * blockDim.y + threadIdx.y; if ((i < NI) && (j < NL)) { int k; for(k=0; k < NJ; k++) { G[i * NL + j] += E[i * NJ + k] * F[k * NL + j]; } } } void mm3_cpu(DATA_TYPE *A, DATA_TYPE *B, DATA_TYPE *C, DATA_TYPE *D, DATA_TYPE *E, DATA_TYPE *F, DATA_TYPE *G) { int i,j,k; /* E := A*B */ for (i = 0; i < NI; i++) { for (j = 0; j < NJ; j++) { E[i*NJ + j] = 0; for (k = 0; k < NK; ++k) { E[i*NJ + j] += A[i*NK + k] * B[k*NJ + j]; } } } /* F := C*D */ for (i = 0; i < NJ; i++) { for (j = 0; j < NL; j++) { F[i*NL + j] = 0; for (k = 0; k < NM; ++k) { F[i*NL + j] += C[i*NM + k] * D[k*NL + j]; } } } /* G := E*F */ for (i = 0; i < NI; i++) { for (j = 0; j < NL; j++) { G[i*NL + j] = 0; for (k = 0; k < NJ; ++k) { G[i*NL + j] += E[i*NJ + k] * F[k*NL + j]; } } } } void mm3Cuda(DATA_TYPE* A, DATA_TYPE* B, DATA_TYPE* C, DATA_TYPE* D, DATA_TYPE* E, DATA_TYPE* F, DATA_TYPE* G, DATA_TYPE* G_outputFromGpu) { hipError_t error; double t_start, t_end; DATA_TYPE *A_gpu; DATA_TYPE *B_gpu; DATA_TYPE *C_gpu; DATA_TYPE *D_gpu; DATA_TYPE *E_gpu; DATA_TYPE *F_gpu; DATA_TYPE *G_gpu; error=hipMalloc((void **)&A_gpu, sizeof(DATA_TYPE) * NI * NK); if (error != hipSuccess) { printf("hipMalloc d_A returned error code %d, line(%d)\n", error, __LINE__); exit(EXIT_FAILURE); } error=hipMalloc((void **)&B_gpu, sizeof(DATA_TYPE) * NK * NJ); if (error != hipSuccess) { printf("hipMalloc d_A returned error code %d, line(%d)\n", error, __LINE__); exit(EXIT_FAILURE); } error=hipMalloc((void **)&C_gpu, sizeof(DATA_TYPE) * NJ * NM); if (error != hipSuccess) { printf("hipMalloc d_A returned error code %d, line(%d)\n", error, __LINE__); exit(EXIT_FAILURE); } error=hipMalloc((void **)&D_gpu, sizeof(DATA_TYPE) * NM * NL); if (error != hipSuccess) { printf("hipMalloc d_A returned error code %d, line(%d)\n", error, __LINE__); exit(EXIT_FAILURE); } error=hipMalloc((void **)&E_gpu, sizeof(DATA_TYPE) * NI * NJ); if (error != hipSuccess) { printf("hipMalloc d_A returned error code %d, line(%d)\n", error, __LINE__); exit(EXIT_FAILURE); } error=hipMalloc((void **)&F_gpu, sizeof(DATA_TYPE) * NJ * NL); if (error != hipSuccess) { printf("hipMalloc d_A returned error code %d, line(%d)\n", error, __LINE__); exit(EXIT_FAILURE); } error=hipMalloc((void **)&G_gpu, sizeof(DATA_TYPE) * NI * NL); if (error != hipSuccess) { printf("hipMalloc d_A returned error code %d, line(%d)\n", error, __LINE__); exit(EXIT_FAILURE); } error=hipMemcpy(A_gpu, A, sizeof(DATA_TYPE) * NI * NK, hipMemcpyHostToDevice); if (error != hipSuccess) { printf("hipMalloc d_A returned error code %d, line(%d)\n", error, __LINE__); exit(EXIT_FAILURE); } error=hipMemcpy(B_gpu, B, sizeof(DATA_TYPE) * NK * NJ, hipMemcpyHostToDevice); if (error != hipSuccess) { printf("hipMalloc d_A returned error code %d, line(%d)\n", error, __LINE__); exit(EXIT_FAILURE); } error=hipMemcpy(C_gpu, C, sizeof(DATA_TYPE) * NJ * NM, hipMemcpyHostToDevice); if (error != hipSuccess) { printf("hipMalloc d_A returned error code %d, line(%d)\n", error, __LINE__); exit(EXIT_FAILURE); } error=hipMemcpy(D_gpu, D, sizeof(DATA_TYPE) * NM * NL, hipMemcpyHostToDevice); if (error != hipSuccess) { printf("hipMalloc d_A returned error code %d, line(%d)\n", error, __LINE__); exit(EXIT_FAILURE); } error=hipMemcpy(E_gpu, E, sizeof(DATA_TYPE) * NI * NJ, hipMemcpyHostToDevice); if (error != hipSuccess) { printf("hipMalloc d_A returned error code %d, line(%d)\n", error, __LINE__); exit(EXIT_FAILURE); } error=hipMemcpy(F_gpu, F, sizeof(DATA_TYPE) * NJ * NL, hipMemcpyHostToDevice); if (error != hipSuccess) { printf("hipMalloc d_A returned error code %d, line(%d)\n", error, __LINE__); exit(EXIT_FAILURE); } error=hipMemcpy(G_gpu, G, sizeof(DATA_TYPE) * NI * NL, hipMemcpyHostToDevice); if (error != hipSuccess) { printf("hipMalloc d_A returned error code %d, line(%d)\n", error, __LINE__); exit(EXIT_FAILURE); } dim3 block(DIM_THREAD_BLOCK_X, DIM_THREAD_BLOCK_Y); dim3 grid1((size_t)(ceil( ((float)NJ) / ((float)DIM_THREAD_BLOCK_X) )),(size_t)(ceil((float)NI/ ((float)DIM_THREAD_BLOCK_Y) ))); dim3 grid2((size_t)(ceil( ((float)NL) / ((float)DIM_THREAD_BLOCK_X) )),(size_t)(ceil((float)NJ/ ((float)DIM_THREAD_BLOCK_Y) ))); dim3 grid3((size_t)(ceil( ((float)NL) / ((float)DIM_THREAD_BLOCK_X) )),(size_t)(ceil((float)NI/ ((float)DIM_THREAD_BLOCK_Y) ))); // t_start = rtclock(); hipLaunchKernelGGL(( mm3_kernel1), dim3(grid1),dim3(block), 0, 0, A_gpu, B_gpu, E_gpu); hipDeviceSynchronize(); hipLaunchKernelGGL(( mm3_kernel2), dim3(grid2),dim3(block), 0, 0, C_gpu, D_gpu, F_gpu); hipDeviceSynchronize(); hipLaunchKernelGGL(( mm3_kernel3), dim3(grid3),dim3(block), 0, 0, E_gpu, F_gpu, G_gpu); hipDeviceSynchronize(); // t_end = rtclock(); error=hipMemcpy(G_outputFromGpu, G_gpu, sizeof(DATA_TYPE) * NI * NL, hipMemcpyDeviceToHost); if (error != hipSuccess) { printf("hipMalloc d_A returned error code %d, line(%d)\n", error, __LINE__); exit(EXIT_FAILURE); } // fprintf(stdout, "GPU Runtime: %0.6lfs\n", t_end - t_start); hipFree(A_gpu); hipFree(B_gpu); hipFree(C_gpu); hipFree(D_gpu); hipFree(E_gpu); hipFree(F_gpu); hipFree(G_gpu); } int main(int argc, char** argv) { /* Prepare ctuning vars */ long ct_repeat=0; long ct_repeat_max=1; DATA_TYPE* A; DATA_TYPE* B; DATA_TYPE* C; DATA_TYPE* D; DATA_TYPE* E; DATA_TYPE* F; DATA_TYPE* G; DATA_TYPE* G_outputFromGpu; #ifdef XOPENME xopenme_init(2,0); #endif #ifdef OPENME openme_init(NULL,NULL,NULL,0); openme_callback("PROGRAM_START", NULL); #endif /* Run kernel. */ if (getenv("CT_REPEAT_MAIN")!=NULL) ct_repeat_max=atol(getenv("CT_REPEAT_MAIN")); A = (DATA_TYPE*)malloc(NI*NK*sizeof(DATA_TYPE)); B = (DATA_TYPE*)malloc(NK*NJ*sizeof(DATA_TYPE)); C = (DATA_TYPE*)malloc(NJ*NM*sizeof(DATA_TYPE)); D = (DATA_TYPE*)malloc(NM*NL*sizeof(DATA_TYPE)); E = (DATA_TYPE*)malloc(NI*NJ*sizeof(DATA_TYPE)); F = (DATA_TYPE*)malloc(NJ*NL*sizeof(DATA_TYPE)); G = (DATA_TYPE*)malloc(NI*NL*sizeof(DATA_TYPE)); G_outputFromGpu = (DATA_TYPE*)malloc(NI*NL*sizeof(DATA_TYPE)); srand(1); init_array(A, B, C, D); GPU_argv_init(); #ifdef OPENME openme_callback("ACC_KERNEL_START", NULL); #endif #ifdef XOPENME xopenme_clock_start(0); #endif for (ct_repeat=0; ct_repeat<ct_repeat_max; ct_repeat++) { mm3Cuda(A, B, C, D, E, F, G, G_outputFromGpu); } #ifdef XOPENME xopenme_clock_end(0); #endif #ifdef OPENME openme_callback("ACC_KERNEL_END", NULL); #endif /* srand(1); init_array(A, B, C, D); #ifdef OPENME openme_callback("KERNEL_START", NULL); #endif #ifdef XOPENME xopenme_clock_start(1); #endif for (ct_repeat=0; ct_repeat<ct_repeat_max; ct_repeat++) { mm3_cpu(A, B, C, D, E, F, G); } #ifdef XOPENME xopenme_clock_end(1); #endif #ifdef OPENME openme_callback("KERNEL_END", NULL); #endif */ compareResults(G, G_outputFromGpu); free(A); free(B); free(C); free(D); free(E); free(F); free(G); free(G_outputFromGpu); #ifdef XOPENME xopenme_dump_state(); xopenme_finish(); #endif #ifdef OPENME openme_callback("PROGRAM_END", NULL); #endif return 0; }
fc54eead8c3e9b2d29f096f80caf93724146100f.cu
/** * 3mm.cu: This file is part of the PolyBench/GPU 1.0 test suite. * * * Contact: Scott Grauer-Gray <[email protected]> * Louis-Noel Pouchet <[email protected]> * Web address: http://www.cse.ohio-state.edu/~pouchet/software/polybench/GPU * * Updated by Grigori Fursin (http://cTuning.org/lab/people/gfursin) * to work with Collective Mind, OpenME plugin interface and * Collective Knowledge Frameworks for automatic, machine-learning based * and collective tuning and data mining: http://cTuning.org * */ #ifndef WINDOWS #include <unistd.h> #endif #include <stdio.h> #include <stdlib.h> #include <math.h> #include <assert.h> #include <cuda.h> #include "polybench.h" #ifdef OPENME #include <openme.h> #endif #ifdef XOPENME #include <xopenme.h> #endif #define GPU_DEVICE 0 //define the error threshold for the results "not matching" #define PERCENT_DIFF_ERROR_THRESHOLD 0.05 /* Problem size. */ #ifndef NI # define NI 256 //512 #endif #ifndef NJ # define NJ 256 //512 #endif #ifndef NK # define NK 256 //512 #endif #ifndef NL # define NL 256 //512 #endif #ifndef NM # define NM 256 //512 #endif /* Thread block dimensions */ #ifndef DIM_THREAD_BLOCK_X #define DIM_THREAD_BLOCK_X 8 //32 #endif #ifndef DIM_THREAD_BLOCK_Y #define DIM_THREAD_BLOCK_Y 8 #endif /* Can switch DATA_TYPE between float and double */ # ifndef DATA_TYPE # define DATA_TYPE float # endif void init_array(DATA_TYPE* A, DATA_TYPE* B, DATA_TYPE* C, DATA_TYPE* D) { int i, j; for (i = 0; i < NI; i++) { for (j = 0; j < NK; j++) { A[i*NK + j] = ((DATA_TYPE) i*j) / NI; } } for (i = 0; i < NK; i++) { for (j = 0; j < NJ; j++) { B[i*NJ + j] = ((DATA_TYPE) i*(j+1)) / NJ; } } for (i = 0; i < NJ; i++) { for (j = 0; j < NM; j++) { C[i*NM + j] = ((DATA_TYPE) i*(j+3)) / NL; } } for (i = 0; i < NM; i++) { for (j = 0; j < NL; j++) { D[i*NL + j] = ((DATA_TYPE) i*(j+2)) / NK; } } } void compareResults(DATA_TYPE *G, DATA_TYPE *G_outputFromGpu) { int i,j,fail; fail = 0; for (i=0; i < NI; i++) { for (j=0; j < NL; j++) { if (percentDiff(G[i*NL + j], G_outputFromGpu[i*NL + j]) > PERCENT_DIFF_ERROR_THRESHOLD) { fail++; } } } // print results printf("Non-Matching CPU-GPU Outputs Beyond Error Threshold of %4.2f Percent: %d\n", PERCENT_DIFF_ERROR_THRESHOLD, fail); } void GPU_argv_init() { /* Grigori Fursin added support for CK widgets */ int gpgpu_device_id=GPU_DEVICE; int devID = 0; cudaError_t error; cudaDeviceProp deviceProp; error = cudaGetDevice(&devID); if (getenv("CK_COMPUTE_DEVICE_ID")!=NULL) gpgpu_device_id=atol(getenv("CK_COMPUTE_DEVICE_ID")); cudaGetDeviceProperties(&deviceProp, gpgpu_device_id); if (deviceProp.computeMode == cudaComputeModeProhibited) { printf("Error: device is running in <Compute Mode Prohibited>, no threads can use ::cudaSetDevice().\n"); exit(EXIT_SUCCESS); } if (error != cudaSuccess) printf("cudaGetDeviceProperties returned error code %d, line(%d)\n", error, __LINE__); else printf("GPU Device %d: \"%s\" with compute capability %d.%d\n\n", devID, deviceProp.name, deviceProp.major, deviceProp.minor); cudaSetDevice( gpgpu_device_id ); } __global__ void mm3_kernel1(DATA_TYPE *A, DATA_TYPE *B, DATA_TYPE *E) { int j = blockIdx.x * blockDim.x + threadIdx.x; int i = blockIdx.y * blockDim.y + threadIdx.y; if ((i < NI) && (j < NJ)) { int k; for(k=0; k < NK; k++) { E[i * NJ + j] += A[i * NK + k] * B[k * NJ + j]; } } } __global__ void mm3_kernel2(DATA_TYPE *C, DATA_TYPE *D, DATA_TYPE *F) { int j = blockIdx.x * blockDim.x + threadIdx.x; int i = blockIdx.y * blockDim.y + threadIdx.y; if ((i < NJ) && (j < NL)) { int k; for(k=0; k < NM; k++) { F[i * NL + j] += C[i * NM + k] * D[k * NL +j]; } } } __global__ void mm3_kernel3(DATA_TYPE *E, DATA_TYPE *F, DATA_TYPE *G) { int j = blockIdx.x * blockDim.x + threadIdx.x; int i = blockIdx.y * blockDim.y + threadIdx.y; if ((i < NI) && (j < NL)) { int k; for(k=0; k < NJ; k++) { G[i * NL + j] += E[i * NJ + k] * F[k * NL + j]; } } } void mm3_cpu(DATA_TYPE *A, DATA_TYPE *B, DATA_TYPE *C, DATA_TYPE *D, DATA_TYPE *E, DATA_TYPE *F, DATA_TYPE *G) { int i,j,k; /* E := A*B */ for (i = 0; i < NI; i++) { for (j = 0; j < NJ; j++) { E[i*NJ + j] = 0; for (k = 0; k < NK; ++k) { E[i*NJ + j] += A[i*NK + k] * B[k*NJ + j]; } } } /* F := C*D */ for (i = 0; i < NJ; i++) { for (j = 0; j < NL; j++) { F[i*NL + j] = 0; for (k = 0; k < NM; ++k) { F[i*NL + j] += C[i*NM + k] * D[k*NL + j]; } } } /* G := E*F */ for (i = 0; i < NI; i++) { for (j = 0; j < NL; j++) { G[i*NL + j] = 0; for (k = 0; k < NJ; ++k) { G[i*NL + j] += E[i*NJ + k] * F[k*NL + j]; } } } } void mm3Cuda(DATA_TYPE* A, DATA_TYPE* B, DATA_TYPE* C, DATA_TYPE* D, DATA_TYPE* E, DATA_TYPE* F, DATA_TYPE* G, DATA_TYPE* G_outputFromGpu) { cudaError_t error; double t_start, t_end; DATA_TYPE *A_gpu; DATA_TYPE *B_gpu; DATA_TYPE *C_gpu; DATA_TYPE *D_gpu; DATA_TYPE *E_gpu; DATA_TYPE *F_gpu; DATA_TYPE *G_gpu; error=cudaMalloc((void **)&A_gpu, sizeof(DATA_TYPE) * NI * NK); if (error != cudaSuccess) { printf("cudaMalloc d_A returned error code %d, line(%d)\n", error, __LINE__); exit(EXIT_FAILURE); } error=cudaMalloc((void **)&B_gpu, sizeof(DATA_TYPE) * NK * NJ); if (error != cudaSuccess) { printf("cudaMalloc d_A returned error code %d, line(%d)\n", error, __LINE__); exit(EXIT_FAILURE); } error=cudaMalloc((void **)&C_gpu, sizeof(DATA_TYPE) * NJ * NM); if (error != cudaSuccess) { printf("cudaMalloc d_A returned error code %d, line(%d)\n", error, __LINE__); exit(EXIT_FAILURE); } error=cudaMalloc((void **)&D_gpu, sizeof(DATA_TYPE) * NM * NL); if (error != cudaSuccess) { printf("cudaMalloc d_A returned error code %d, line(%d)\n", error, __LINE__); exit(EXIT_FAILURE); } error=cudaMalloc((void **)&E_gpu, sizeof(DATA_TYPE) * NI * NJ); if (error != cudaSuccess) { printf("cudaMalloc d_A returned error code %d, line(%d)\n", error, __LINE__); exit(EXIT_FAILURE); } error=cudaMalloc((void **)&F_gpu, sizeof(DATA_TYPE) * NJ * NL); if (error != cudaSuccess) { printf("cudaMalloc d_A returned error code %d, line(%d)\n", error, __LINE__); exit(EXIT_FAILURE); } error=cudaMalloc((void **)&G_gpu, sizeof(DATA_TYPE) * NI * NL); if (error != cudaSuccess) { printf("cudaMalloc d_A returned error code %d, line(%d)\n", error, __LINE__); exit(EXIT_FAILURE); } error=cudaMemcpy(A_gpu, A, sizeof(DATA_TYPE) * NI * NK, cudaMemcpyHostToDevice); if (error != cudaSuccess) { printf("cudaMalloc d_A returned error code %d, line(%d)\n", error, __LINE__); exit(EXIT_FAILURE); } error=cudaMemcpy(B_gpu, B, sizeof(DATA_TYPE) * NK * NJ, cudaMemcpyHostToDevice); if (error != cudaSuccess) { printf("cudaMalloc d_A returned error code %d, line(%d)\n", error, __LINE__); exit(EXIT_FAILURE); } error=cudaMemcpy(C_gpu, C, sizeof(DATA_TYPE) * NJ * NM, cudaMemcpyHostToDevice); if (error != cudaSuccess) { printf("cudaMalloc d_A returned error code %d, line(%d)\n", error, __LINE__); exit(EXIT_FAILURE); } error=cudaMemcpy(D_gpu, D, sizeof(DATA_TYPE) * NM * NL, cudaMemcpyHostToDevice); if (error != cudaSuccess) { printf("cudaMalloc d_A returned error code %d, line(%d)\n", error, __LINE__); exit(EXIT_FAILURE); } error=cudaMemcpy(E_gpu, E, sizeof(DATA_TYPE) * NI * NJ, cudaMemcpyHostToDevice); if (error != cudaSuccess) { printf("cudaMalloc d_A returned error code %d, line(%d)\n", error, __LINE__); exit(EXIT_FAILURE); } error=cudaMemcpy(F_gpu, F, sizeof(DATA_TYPE) * NJ * NL, cudaMemcpyHostToDevice); if (error != cudaSuccess) { printf("cudaMalloc d_A returned error code %d, line(%d)\n", error, __LINE__); exit(EXIT_FAILURE); } error=cudaMemcpy(G_gpu, G, sizeof(DATA_TYPE) * NI * NL, cudaMemcpyHostToDevice); if (error != cudaSuccess) { printf("cudaMalloc d_A returned error code %d, line(%d)\n", error, __LINE__); exit(EXIT_FAILURE); } dim3 block(DIM_THREAD_BLOCK_X, DIM_THREAD_BLOCK_Y); dim3 grid1((size_t)(ceil( ((float)NJ) / ((float)DIM_THREAD_BLOCK_X) )),(size_t)(ceil((float)NI/ ((float)DIM_THREAD_BLOCK_Y) ))); dim3 grid2((size_t)(ceil( ((float)NL) / ((float)DIM_THREAD_BLOCK_X) )),(size_t)(ceil((float)NJ/ ((float)DIM_THREAD_BLOCK_Y) ))); dim3 grid3((size_t)(ceil( ((float)NL) / ((float)DIM_THREAD_BLOCK_X) )),(size_t)(ceil((float)NI/ ((float)DIM_THREAD_BLOCK_Y) ))); // t_start = rtclock(); mm3_kernel1<<<grid1,block>>>(A_gpu, B_gpu, E_gpu); cudaThreadSynchronize(); mm3_kernel2<<<grid2,block>>>(C_gpu, D_gpu, F_gpu); cudaThreadSynchronize(); mm3_kernel3<<<grid3,block>>>(E_gpu, F_gpu, G_gpu); cudaThreadSynchronize(); // t_end = rtclock(); error=cudaMemcpy(G_outputFromGpu, G_gpu, sizeof(DATA_TYPE) * NI * NL, cudaMemcpyDeviceToHost); if (error != cudaSuccess) { printf("cudaMalloc d_A returned error code %d, line(%d)\n", error, __LINE__); exit(EXIT_FAILURE); } // fprintf(stdout, "GPU Runtime: %0.6lfs\n", t_end - t_start); cudaFree(A_gpu); cudaFree(B_gpu); cudaFree(C_gpu); cudaFree(D_gpu); cudaFree(E_gpu); cudaFree(F_gpu); cudaFree(G_gpu); } int main(int argc, char** argv) { /* Prepare ctuning vars */ long ct_repeat=0; long ct_repeat_max=1; DATA_TYPE* A; DATA_TYPE* B; DATA_TYPE* C; DATA_TYPE* D; DATA_TYPE* E; DATA_TYPE* F; DATA_TYPE* G; DATA_TYPE* G_outputFromGpu; #ifdef XOPENME xopenme_init(2,0); #endif #ifdef OPENME openme_init(NULL,NULL,NULL,0); openme_callback("PROGRAM_START", NULL); #endif /* Run kernel. */ if (getenv("CT_REPEAT_MAIN")!=NULL) ct_repeat_max=atol(getenv("CT_REPEAT_MAIN")); A = (DATA_TYPE*)malloc(NI*NK*sizeof(DATA_TYPE)); B = (DATA_TYPE*)malloc(NK*NJ*sizeof(DATA_TYPE)); C = (DATA_TYPE*)malloc(NJ*NM*sizeof(DATA_TYPE)); D = (DATA_TYPE*)malloc(NM*NL*sizeof(DATA_TYPE)); E = (DATA_TYPE*)malloc(NI*NJ*sizeof(DATA_TYPE)); F = (DATA_TYPE*)malloc(NJ*NL*sizeof(DATA_TYPE)); G = (DATA_TYPE*)malloc(NI*NL*sizeof(DATA_TYPE)); G_outputFromGpu = (DATA_TYPE*)malloc(NI*NL*sizeof(DATA_TYPE)); srand(1); init_array(A, B, C, D); GPU_argv_init(); #ifdef OPENME openme_callback("ACC_KERNEL_START", NULL); #endif #ifdef XOPENME xopenme_clock_start(0); #endif for (ct_repeat=0; ct_repeat<ct_repeat_max; ct_repeat++) { mm3Cuda(A, B, C, D, E, F, G, G_outputFromGpu); } #ifdef XOPENME xopenme_clock_end(0); #endif #ifdef OPENME openme_callback("ACC_KERNEL_END", NULL); #endif /* srand(1); init_array(A, B, C, D); #ifdef OPENME openme_callback("KERNEL_START", NULL); #endif #ifdef XOPENME xopenme_clock_start(1); #endif for (ct_repeat=0; ct_repeat<ct_repeat_max; ct_repeat++) { mm3_cpu(A, B, C, D, E, F, G); } #ifdef XOPENME xopenme_clock_end(1); #endif #ifdef OPENME openme_callback("KERNEL_END", NULL); #endif */ compareResults(G, G_outputFromGpu); free(A); free(B); free(C); free(D); free(E); free(F); free(G); free(G_outputFromGpu); #ifdef XOPENME xopenme_dump_state(); xopenme_finish(); #endif #ifdef OPENME openme_callback("PROGRAM_END", NULL); #endif return 0; }
68c1e22f84de3492f5ef10d0b5f25e5f59412ee1.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include <algorithm> #include <cassert> #include <cstddef> #include <iostream> #include <numeric> #include <map> #include <sys/time.h> #include <valarray> #include <hdf5.h> #include "range.hpp" #include "utils.hpp" #define NUM_ROWS 28 #define NUM_COLS 28 #define NUM_CHANNELS 1 #define NUM_DIGITS 10 #define TILE_WIDTH 16 #define MAX_WIDTH 1024 #define POOL_SIZE 2 // matrix tile set to 25 to make offset calculation easier #define CONV_TILE 25 static int FLAGS_batch_size = 10000; static std::string FLAGS_testdata{}; static std::string FLAGS_model{}; // Data and reference data dimensions static int xdims[] = {FLAGS_batch_size, NUM_ROWS, NUM_COLS, NUM_CHANNELS}; static int rdims[] = {FLAGS_batch_size, NUM_DIGITS}; // Model dimensions static int conv1dims[] = {5, 5, 1, 32}; static int conv2dims[] = {5, 5, 32, 64}; static int fc1dims[] = {1024, 128}; static int fc2dims[] = {128, 10}; // GPU functions __global__ void convLayerForwardBasicKernel(float * X, float * W, float * Y, int W_grid, int input_wid, int output_wid, int mask_wid, int numInput, int numOutput) { int output_num = blockIdx.y; int input_num = blockIdx.x; int h = blockIdx.z / W_grid * TILE_WIDTH + threadIdx.y; //h tile index int w = blockIdx.z % W_grid * TILE_WIDTH + threadIdx.x; // w tiles index if ((h < output_wid) && (w < output_wid)){ float acc = 0.0f; for (int c = 0; c < numInput; c++) { // input features for (int p = 0; p < mask_wid; p++) { //index in tile height in ouput feature for (int q = 0; q < mask_wid; q ++) { //index in tile width in ouput feature acc += (X[((input_num * input_wid + (h + p)) * input_wid + (w + q)) * numInput + c] * W[((p * mask_wid + q) * numInput + c) * numOutput + output_num]); } } } Y[((input_num * output_wid + h) * output_wid + w) * numOutput + output_num] = acc; } } // This function uses matrix multiply to calculate convolution, it transform the input matrix // and load that to shared memory on the fly // We also combined relu to the writeback part of kernel __global__ void convLayerForwardMatrixKernel(float * X, float * W, float * Y, int input_wid, int output_wid, int numInput, int numOutput) { __shared__ float W_tile[CONV_TILE][25]; __shared__ float X_tile[25][CONV_TILE]; int bx = blockIdx.x; // test case number int by = blockIdx.y; // output feature map number (by*CONV_TILE+ty) int bz = blockIdx.z; // output feature map dimension (bz*CONV_TILE+tz) int tx=threadIdx.x; // output feature map number int ty=threadIdx.y; // output feature map dimension // if tx and ty are within range int i,j; float sum=0.0f; int w_index,x_index; // offset doesn't change during iteration so calculate them in advance int x_offset=(bx*input_wid*input_wid+((bz*CONV_TILE+ty)/output_wid+tx/5)*input_wid+((bz*CONV_TILE+ty)%output_wid)+tx%5)*numInput; int w_offset=ty*numInput*numOutput+by*CONV_TILE+tx; for (i=0;i<numInput;i++){ // get indexes w_index=w_offset+i*numOutput; x_index=x_offset+i; __syncthreads(); if ((tx+by*CONV_TILE)<numOutput) W_tile[tx][ty]=W[w_index]; else W_tile[tx][ty]=0.0f; if ((ty+bz*CONV_TILE)<(output_wid*output_wid)) X_tile[tx][ty]=X[x_index]; else X_tile[tx][ty]=0.0f; __syncthreads(); // matrix multiply for (j=0;j<CONV_TILE;j++){ sum+=(W_tile[tx][j]*X_tile[j][ty]); } } __syncthreads(); // output format is the same as input, not the expanded matrix format if (((tx+by*CONV_TILE)<numOutput)&&(((ty+bz*CONV_TILE)/output_wid)<output_wid)){ int y_offset=(bx*output_wid*output_wid+bz*CONV_TILE+ty)*numOutput+(tx+by*CONV_TILE); if (sum<0.0) sum=0.0f; Y[y_offset]=sum; } } __global__ void averagePool (float * X, float * Y, int W_grid, int input_wid, int output_wid, int numInput) { int output_num = blockIdx.y; int input_num = blockIdx.x; int h = blockIdx.z / W_grid * TILE_WIDTH + threadIdx.y; //h tile index int w = blockIdx.z % W_grid * TILE_WIDTH + threadIdx.x; // w tiles index if ((h < output_wid) && (w < output_wid)){ float sum=0.0f; int yoffset = ((input_num * output_wid + h) * output_wid + w) * numInput + output_num; for (int p = 0; p < POOL_SIZE; p++) { //index in tile height in ouput feature for (int q = 0; q < POOL_SIZE; q ++) { //index in tile width in ouput feature sum += X[((input_num * input_wid + POOL_SIZE * h + p)* input_wid + POOL_SIZE * w + q) * numInput + output_num]; } } Y[yoffset]=sum/4.0; } } __global__ void gpu_relu4 (float * X, int total) { int X_idx = blockDim.x * blockIdx.x + threadIdx.x; if (X_idx < total){ if (X[X_idx]<0.0) X[X_idx]=0.0f; // X[X_idx] = (X[X_idx] < 0) ? 0 : X[X_idx]; } } // NN using shared memory __global__ void gpu_fully_forward(float *X, float *W, float *Y, int output_size, int input_size){ __shared__ float datain[1024]; int tx=threadIdx.x; int bx=blockIdx.x; float tmp; // load data in // eliminate relu2 by checking if input is less than 0 if (tx<input_size) tmp=X[bx*blockDim.x+tx]; if (tmp<0) tmp=0.0f; datain[tx]=tmp; __syncthreads(); // calculate result if (tx<output_size){ int i; float sum=0.0f; for (i=0;i<input_size;i++) sum+=datain[i]*W[i*output_size+tx]; Y[bx*output_size+tx]=sum; } } static int loadData(float *x, float *y) { // Open the data file const auto file_id = H5Fopen(FLAGS_testdata.c_str(), H5F_ACC_RDWR, H5P_DEFAULT); // Open the dataset x and y const auto x_id = H5Dopen2(file_id, "/x", H5P_DEFAULT); const auto y_id = H5Dopen2(file_id, "/y", H5P_DEFAULT); // Get the dataset x dimensions const auto xspace = H5Dget_space(x_id); const auto xndims = H5Sget_simple_extent_ndims(xspace); assert(xndims == 4); hsize_t input_dims[xndims]; H5Sget_simple_extent_dims(xspace, input_dims, NULL); if (input_dims[0] != FLAGS_batch_size) { std::cout << "data size does not match batch size specified!\n"; return 1; // return error } std::cout << "input dimensions = " << input_dims[0] << " x " << input_dims[1] << " x " << input_dims[2] << " x " << input_dims[3] << "\n"; // Read the dataset x and y check_success( H5Dread(x_id, H5T_NATIVE_FLOAT, H5S_ALL, H5S_ALL, H5P_DEFAULT, x)); check_success( H5Dread(y_id, H5T_NATIVE_FLOAT, H5S_ALL, H5S_ALL, H5P_DEFAULT, y)); // Close the dataset x and y check_success(H5Dclose(x_id)); check_success(H5Dclose(y_id)); // Close the file check_success(H5Fclose(file_id)); // return success return 0; } static void loadModel(float *conv1, float *conv2, float *fc1, float *fc2) { // Open the model file const auto file_id = H5Fopen(FLAGS_model.c_str(), H5F_ACC_RDWR, H5P_DEFAULT); // Open the dataset const auto conv1_id = H5Dopen2(file_id, "/conv1", H5P_DEFAULT); const auto conv2_id = H5Dopen2(file_id, "/conv2", H5P_DEFAULT); const auto fc1_id = H5Dopen2(file_id, "/fc1", H5P_DEFAULT); const auto fc2_id = H5Dopen2(file_id, "/fc2", H5P_DEFAULT); // Read the dataset check_success(H5Dread(conv1_id, H5T_NATIVE_FLOAT, H5S_ALL, H5S_ALL, H5P_DEFAULT, conv1)); check_success(H5Dread(conv2_id, H5T_NATIVE_FLOAT, H5S_ALL, H5S_ALL, H5P_DEFAULT, conv2)); check_success( H5Dread(fc1_id, H5T_NATIVE_FLOAT, H5S_ALL, H5S_ALL, H5P_DEFAULT, fc1)); check_success( H5Dread(fc2_id, H5T_NATIVE_FLOAT, H5S_ALL, H5S_ALL, H5P_DEFAULT, fc2)); // Close the dataset x and y check_success(H5Dclose(conv1_id)); check_success(H5Dclose(conv2_id)); check_success(H5Dclose(fc1_id)); check_success(H5Dclose(fc2_id)); // Close the file check_success(H5Fclose(file_id)); } // From book chapter Figure 16.4 static void conv_forward_valid(const float *X, const int xdims[4], const float *W, const int wdims[4], float *Y, const int ydims[4]) { const auto filter_h = wdims[0]; const auto filter_w = wdims[1]; const auto in_channel = wdims[2]; for (const auto i : range(0, ydims[0])) { //number of input feature maps for (const auto m : range(0, ydims[3])) { // number of output feature maps for (const auto h : range(0, ydims[1])) { // image width for (const auto w : range(0, ydims[2])) { // image height for (const auto p : range(0, filter_h)) { // filter height for (const auto q : range(0, filter_w)) { // filter width for (const auto c : range(0, in_channel)) { // number of filters const auto yoffset = ((i * ydims[1] + h) * ydims[2] + w) * ydims[3] + m; const auto xoffset = i * xdims[1] * xdims[2] * xdims[3] + (h + p) * xdims[2] * xdims[3] + (w + q) * xdims[3] + c; const auto woffset = p * wdims[1] * wdims[2] * wdims[3] + q * wdims[2] * wdims[3] + c * wdims[3] + m; Y[yoffset] += X[xoffset] * W[woffset]; } } } } } } } } // Recified linear unit 4d static void relu4(float *X, const int xdims[4]) { for (const auto i : range(0, xdims[0] * xdims[1] * xdims[2] * xdims[3])) { X[i] = (X[i] < 0) ? 0 : X[i]; } } // Recified linear unit 2d static void relu2(float *X, const int xdims[2]) { for (const auto i : range(0, xdims[0] * xdims[1])) { X[i] = (X[i] < 0) ? 0 : X[i]; } } // From book chapter Figure 16.5 static void average_pool(const float *X, const int xdims[4], const int pool_size, float *Y, const int ydims[4]) { for (const auto i : range(0, ydims[0])) { for (const auto m : range(0, ydims[3])) { for (const auto w : range(0, ydims[2])) { for (const auto h : range(0, ydims[1])) { for (const auto p : range(0, pool_size)) { for (const auto q : range(0, pool_size)) { const auto yoffset = ((i * ydims[1] + h) * ydims[2] + w) * ydims[3] + m; const auto xoffset = i * xdims[1] * xdims[2] * xdims[3] + (pool_size * h + p) * xdims[2] * xdims[3] + (pool_size * w + q) * xdims[3] + m; Y[yoffset] += X[xoffset] / (1.0f * pool_size * pool_size); } } } } } } } static void fully_forward(const float *X, const int xdims[2], float *W, const int wdims[2], float *Y, const int ydims[2]) { for (const auto i : range(0, xdims[0])) { for (const auto j : range(0, wdims[1])) { float sum = 0; for (const auto k : range(0, xdims[1])) { sum += X[i * xdims[1] + k] * W[k * wdims[1] + j]; } Y[i * wdims[1] + j] = sum; } } } // Choose the guess with largest score static void argmax(const float *X, const int xdims[2], int *Y) { for (const auto i : range(0, xdims[0])) { auto max_idx = 0; auto max = X[i * xdims[1]]; for (const auto j : range(0, xdims[1])) { const auto elem = X[(i * xdims[1]) + j]; if (elem > max) { max_idx = j; max = elem; } } Y[i] = max_idx; } } // Forward operation for the CNN, a combination of conv layer + average pooling // + relu void forward_operation(float *x, float *conv1, float *conv2, float *fc1, float *fc2, int *out) { // conv layer const int adims[] = {xdims[0], (xdims[1] - conv1dims[0] + 1), (xdims[2] - conv1dims[1] + 1), conv1dims[3]}; auto a = zeros<float>(adims); conv_forward_valid(x, xdims, conv1, conv1dims, a, adims); // int i,j,k,l; /// relu layer relu4(a, adims); // average pooling const int pool_size = 2; const int bdims[] = {adims[0], adims[1] / pool_size, adims[2] / pool_size, adims[3]}; auto b = zeros<float>(bdims); average_pool(a, adims, pool_size, b, bdims); // // for (i=0;i<10;i++){ // for (j=0;j<24;j++){ // for (k=0;k<24;k++){ // for (l=0;l<32;l++) // printf("%.4f ",a[i*32*24*24+j*32*24+k*32+l]); // printf("\n"); // } // printf("\n"); // } // printf("\n\n"); // } // conv layer const int cdims[] = {bdims[0], (bdims[1] - conv2dims[0] + 1), (bdims[2] - conv2dims[1] + 1), conv2dims[3]}; auto c = zeros<float>(cdims); conv_forward_valid(b, bdims, conv2, conv2dims, c, cdims); // relu relu4(c, cdims); // average pooling const int ddims[] = {cdims[0], cdims[1] / pool_size, cdims[2] / pool_size, cdims[3]}; auto d = zeros<float>(ddims); average_pool(c, cdims, pool_size, d, ddims); // reshape const int ddims2[] = {ddims[0], ddims[1] * ddims[2] * ddims[3]}; // matrix multiplication const int edims[] = {ddims[0], fc1dims[1]}; auto e = zeros<float>(edims); fully_forward(d, ddims2, fc1, fc1dims, e, edims); // relu relu2(e, edims); // matrix multiplication const int fdims[] = {edims[0], fc2dims[1]}; auto f = zeros<float>(fdims); fully_forward(e, edims, fc2, fc2dims, f, fdims); // for (i=0;i<10;i++){ // for (j=0;j<10;j++){ // printf("%.4f ",f[i*10+j]); // } // printf("\n"); // } argmax(f, fdims, out); delete[] a; delete[] b; delete[] c; delete[] d; delete[] e; delete[] f; } // Forward operation for the CNN, a combination of conv layer + average pooling // + relu void forward_operation_gpu(float *x, float *conv1, float *conv2, float *fc1, float *fc2, int *out) { // conv layer float *conv1_input; float *conv1_output; float *conv2_input; float *conv2_output; float *W1; float *W2; float *NN_L1_input; float *NN_L2_input; float *NN_output_gpu; float *NN_L1_weights; float *NN_L2_weights; int argdim[2]={xdims[0],fc2dims[1]}; float *argmax_input=zeros<float>(argdim); int x1dim[4]={xdims[0],xdims[1],xdims[2],xdims[3]}; int y1dim[4]={xdims[0],xdims[1]-conv1dims[0]+1,xdims[2]-conv1dims[1]+1,conv1dims[3]}; int x2dim[4]={xdims[0],y1dim[1]/2,y1dim[2]/2,y1dim[3]}; int y2dim[4]={xdims[0],x2dim[1]-conv2dims[0]+1,x2dim[2]-conv2dims[1]+1,conv2dims[3]}; int NN_1_dim[4]={xdims[0],y2dim[1]/2,y2dim[2]/2,y2dim[3]}; int NN_2_dim[2]={xdims[0],fc1dims[1]}; // allocate global memory check_success(hipMalloc(&conv1_input,sizeof(float)*x1dim[0]*x1dim[1]*x1dim[2]*x1dim[3])); check_success(hipMalloc(&conv1_output,sizeof(float)*y1dim[0]*y1dim[1]*y1dim[2]*y1dim[3])); check_success(hipMalloc(&conv2_input,sizeof(float)*x2dim[0]*x2dim[1]*x2dim[2]*x2dim[3])); check_success(hipMalloc(&conv2_output,sizeof(float)*y2dim[0]*y2dim[1]*y2dim[2]*y2dim[3])); check_success(hipMalloc(&W1,sizeof(float)*conv1dims[0]*conv1dims[1]*conv1dims[2]*conv1dims[3])); check_success(hipMalloc(&W2,sizeof(float)*conv2dims[0]*conv2dims[1]*conv2dims[2]*conv2dims[3])); check_success(hipMalloc(&NN_L1_input,sizeof(float)*NN_1_dim[0]*NN_1_dim[1]*NN_1_dim[2]*NN_1_dim[3])); check_success(hipMalloc(&NN_L2_input,sizeof(float)*NN_2_dim[0]*NN_2_dim[1])); check_success(hipMalloc(&NN_L1_weights,sizeof(float)*fc1dims[0]*fc1dims[1])); check_success(hipMalloc(&NN_L2_weights,sizeof(float)*fc2dims[0]*fc2dims[1])); check_success(hipMalloc(&NN_output_gpu,sizeof(float)*argdim[0]*argdim[1])); check_success(hipMemcpy(conv1_input,x,sizeof(float)*x1dim[0]*x1dim[1]*x1dim[2]*x1dim[3],hipMemcpyHostToDevice)); check_success(hipMemcpy(W2,conv2,sizeof(float)*conv2dims[0]*conv2dims[1]*conv2dims[2]*conv2dims[3],hipMemcpyHostToDevice)); check_success(hipMemcpy(W1,conv1,sizeof(float)*conv1dims[0]*conv1dims[1]*conv1dims[2]*conv1dims[3],hipMemcpyHostToDevice)); check_success(hipMemcpy(NN_L1_weights,fc1,sizeof(float)*fc1dims[0]*fc1dims[1],hipMemcpyHostToDevice)); check_success(hipMemcpy(NN_L2_weights,fc2,sizeof(float)*fc2dims[0]*fc2dims[1],hipMemcpyHostToDevice)); int Y=y1dim[1]*y1dim[2]/CONV_TILE; if ((y1dim[1]*y1dim[2])%CONV_TILE) Y++; int X=y1dim[3]/CONV_TILE; if (y1dim[3]%CONV_TILE) X++; int W_grid=0; dim3 conv1_block (CONV_TILE, CONV_TILE,1); dim3 conv1_grid (xdims[0],X,Y); hipLaunchKernelGGL(( convLayerForwardMatrixKernel), dim3(conv1_grid),dim3(conv1_block), 0, 0, conv1_input,W1,conv1_output,x1dim[1],y1dim[1],x1dim[3],y1dim[3]); // average pool W_grid = x2dim[1] / TILE_WIDTH; int H_grid = x2dim[2] / TILE_WIDTH; if (y1dim[1]%TILE_WIDTH){ W_grid++; H_grid++; } Y = H_grid * W_grid; dim3 avg_pool_1_block (TILE_WIDTH, TILE_WIDTH,1); dim3 avg_pool_1_grid (xdims[0], x2dim[3], Y); hipLaunchKernelGGL(( averagePool), dim3(avg_pool_1_grid),dim3(avg_pool_1_block), 0, 0, conv1_output,conv2_input,W_grid,y1dim[1],x2dim[1],y1dim[3]); // second conv Y=y2dim[1]*y2dim[2]/CONV_TILE; if ((y2dim[1]*y2dim[2])%CONV_TILE) Y++; X=y2dim[3]/CONV_TILE; if (y2dim[3]%CONV_TILE) X++; dim3 conv2_block (CONV_TILE, CONV_TILE,1); dim3 conv2_grid (xdims[0],X,Y); hipLaunchKernelGGL(( convLayerForwardMatrixKernel), dim3(conv2_grid),dim3(conv2_block), 0, 0, conv2_input,W2,conv2_output,x2dim[1],y2dim[1],x2dim[3],y2dim[3]); // average pooling W_grid = NN_1_dim[1] / TILE_WIDTH; H_grid = NN_1_dim[2] / TILE_WIDTH; if (y1dim[1]%TILE_WIDTH){ W_grid++; H_grid++; } Y = H_grid * W_grid; dim3 avg_pool_2_block (TILE_WIDTH, TILE_WIDTH,1); dim3 avg_pool_2_grid (xdims[0], NN_1_dim[3], Y); hipLaunchKernelGGL(( averagePool), dim3(avg_pool_2_grid),dim3(avg_pool_2_block), 0, 0, conv2_output,NN_L1_input,W_grid,y2dim[1],NN_1_dim[1],y2dim[3]); dim3 fully_forward_1_grid(xdims[0],1,1); dim3 fully_forward_1_block(fc1dims[0],1,1); hipLaunchKernelGGL(( gpu_fully_forward), dim3(fully_forward_1_grid),dim3(fully_forward_1_block), 0, 0, NN_L1_input,NN_L1_weights,NN_L2_input,fc1dims[1],fc1dims[0]); dim3 fully_forward_2_grid(xdims[0],1,1); dim3 fully_forward_2_block(fc2dims[0],1,1); hipLaunchKernelGGL(( gpu_fully_forward), dim3(fully_forward_2_grid),dim3(fully_forward_2_block), 0, 0, NN_L2_input,NN_L2_weights,NN_output_gpu,fc2dims[1],fc2dims[0]); check_success(hipMemcpy(argmax_input,NN_output_gpu,sizeof(float)*xdims[0]*fc2dims[1],hipMemcpyDeviceToHost)); hipDeviceSynchronize(); check_success(hipFree(conv1_input)); check_success(hipFree(conv1_output)); check_success(hipFree(conv2_input)); check_success(hipFree(conv2_output)); check_success(hipFree(W1)); check_success(hipFree(W2)); check_success(hipFree(NN_L1_input)); check_success(hipFree(NN_L2_input)); check_success(hipFree(NN_L1_weights)); check_success(hipFree(NN_L2_weights)); check_success(hipFree(NN_output_gpu)); const int fdims[] = {xdims[0], fc2dims[1]}; argmax(argmax_input, fdims, out); free(argmax_input); } int main(int argc, char **argv) { if (argc != 3 && argc != 4) { std::cerr << "\n" << "This program performs the forward opertion step for " "Convolutional Neural Network(CNN). " "Sample usage: \n" << argv[0] << " [../data/test10.hdf5] [../data/model.hdf5] [10]\n"; return -1; } FLAGS_testdata = std::string(argv[1]); FLAGS_model = std::string(argv[2]); if (argc == 3) { const std::map<std::string, int> default_batch_sizes{ {"../data/test2.hdf5", 2}, {"../data/test10.hdf5", 10}, {"../data/test100.hdf5", 100}, {"../data/testfull.hdf5", 10000}}; const auto batch_size_in_map = default_batch_sizes.find(FLAGS_testdata); if (batch_size_in_map == default_batch_sizes.end()) { std::cerr << "\nERROR:: Unrecognized file " << FLAGS_testdata << " batch_size must be specified.\n"; return -1; } FLAGS_batch_size = batch_size_in_map->second; } else if (argc == 4) { FLAGS_batch_size = atoi(argv[3]); } xdims[0] = FLAGS_batch_size; rdims[0] = FLAGS_batch_size; // Load data into x and y float *x = allocate<float>(xdims); float *y = allocate<float>(rdims); loadData(x, y); // Load model float *conv1 = allocate<float>(conv1dims); float *conv2 = allocate<float>(conv2dims); float *fc1 = allocate<float>(fc1dims); float *fc2 = allocate<float>(fc2dims); loadModel(conv1, conv2, fc1, fc2); // Perform foward opertion int *out = zeros<int>(FLAGS_batch_size); // get start time const auto start = now(); forward_operation_gpu(x, conv1, conv2, fc1, fc2, out); // get end time const auto end = now(); // get elapsed time in milliseconds const auto elapsed = std::chrono::duration<double, std::milli>(end - start).count(); // Get reference int *ref = zeros<int>(FLAGS_batch_size); argmax(y, rdims, ref); // Calculate correctness int num_correct = 0; for (const auto i : range(0, FLAGS_batch_size)) { if (out[i] == ref[i]) { num_correct++; } } std::cout << "Done with " << FLAGS_batch_size << " queries in " << "elapsed = " << elapsed << " milliseconds. Correctness: " << static_cast<float>(num_correct) / FLAGS_batch_size << "\n"; delete[] x; delete[] y; delete[] conv1; delete[] conv2; delete[] fc1; delete[] fc2; delete[] out; delete[] ref; return 0; }
68c1e22f84de3492f5ef10d0b5f25e5f59412ee1.cu
#include <algorithm> #include <cassert> #include <cstddef> #include <iostream> #include <numeric> #include <map> #include <sys/time.h> #include <valarray> #include <hdf5.h> #include "range.hpp" #include "utils.hpp" #define NUM_ROWS 28 #define NUM_COLS 28 #define NUM_CHANNELS 1 #define NUM_DIGITS 10 #define TILE_WIDTH 16 #define MAX_WIDTH 1024 #define POOL_SIZE 2 // matrix tile set to 25 to make offset calculation easier #define CONV_TILE 25 static int FLAGS_batch_size = 10000; static std::string FLAGS_testdata{}; static std::string FLAGS_model{}; // Data and reference data dimensions static int xdims[] = {FLAGS_batch_size, NUM_ROWS, NUM_COLS, NUM_CHANNELS}; static int rdims[] = {FLAGS_batch_size, NUM_DIGITS}; // Model dimensions static int conv1dims[] = {5, 5, 1, 32}; static int conv2dims[] = {5, 5, 32, 64}; static int fc1dims[] = {1024, 128}; static int fc2dims[] = {128, 10}; // GPU functions __global__ void convLayerForwardBasicKernel(float * X, float * W, float * Y, int W_grid, int input_wid, int output_wid, int mask_wid, int numInput, int numOutput) { int output_num = blockIdx.y; int input_num = blockIdx.x; int h = blockIdx.z / W_grid * TILE_WIDTH + threadIdx.y; //h tile index int w = blockIdx.z % W_grid * TILE_WIDTH + threadIdx.x; // w tiles index if ((h < output_wid) && (w < output_wid)){ float acc = 0.0f; for (int c = 0; c < numInput; c++) { // input features for (int p = 0; p < mask_wid; p++) { //index in tile height in ouput feature for (int q = 0; q < mask_wid; q ++) { //index in tile width in ouput feature acc += (X[((input_num * input_wid + (h + p)) * input_wid + (w + q)) * numInput + c] * W[((p * mask_wid + q) * numInput + c) * numOutput + output_num]); } } } Y[((input_num * output_wid + h) * output_wid + w) * numOutput + output_num] = acc; } } // This function uses matrix multiply to calculate convolution, it transform the input matrix // and load that to shared memory on the fly // We also combined relu to the writeback part of kernel __global__ void convLayerForwardMatrixKernel(float * X, float * W, float * Y, int input_wid, int output_wid, int numInput, int numOutput) { __shared__ float W_tile[CONV_TILE][25]; __shared__ float X_tile[25][CONV_TILE]; int bx = blockIdx.x; // test case number int by = blockIdx.y; // output feature map number (by*CONV_TILE+ty) int bz = blockIdx.z; // output feature map dimension (bz*CONV_TILE+tz) int tx=threadIdx.x; // output feature map number int ty=threadIdx.y; // output feature map dimension // if tx and ty are within range int i,j; float sum=0.0f; int w_index,x_index; // offset doesn't change during iteration so calculate them in advance int x_offset=(bx*input_wid*input_wid+((bz*CONV_TILE+ty)/output_wid+tx/5)*input_wid+((bz*CONV_TILE+ty)%output_wid)+tx%5)*numInput; int w_offset=ty*numInput*numOutput+by*CONV_TILE+tx; for (i=0;i<numInput;i++){ // get indexes w_index=w_offset+i*numOutput; x_index=x_offset+i; __syncthreads(); if ((tx+by*CONV_TILE)<numOutput) W_tile[tx][ty]=W[w_index]; else W_tile[tx][ty]=0.0f; if ((ty+bz*CONV_TILE)<(output_wid*output_wid)) X_tile[tx][ty]=X[x_index]; else X_tile[tx][ty]=0.0f; __syncthreads(); // matrix multiply for (j=0;j<CONV_TILE;j++){ sum+=(W_tile[tx][j]*X_tile[j][ty]); } } __syncthreads(); // output format is the same as input, not the expanded matrix format if (((tx+by*CONV_TILE)<numOutput)&&(((ty+bz*CONV_TILE)/output_wid)<output_wid)){ int y_offset=(bx*output_wid*output_wid+bz*CONV_TILE+ty)*numOutput+(tx+by*CONV_TILE); if (sum<0.0) sum=0.0f; Y[y_offset]=sum; } } __global__ void averagePool (float * X, float * Y, int W_grid, int input_wid, int output_wid, int numInput) { int output_num = blockIdx.y; int input_num = blockIdx.x; int h = blockIdx.z / W_grid * TILE_WIDTH + threadIdx.y; //h tile index int w = blockIdx.z % W_grid * TILE_WIDTH + threadIdx.x; // w tiles index if ((h < output_wid) && (w < output_wid)){ float sum=0.0f; int yoffset = ((input_num * output_wid + h) * output_wid + w) * numInput + output_num; for (int p = 0; p < POOL_SIZE; p++) { //index in tile height in ouput feature for (int q = 0; q < POOL_SIZE; q ++) { //index in tile width in ouput feature sum += X[((input_num * input_wid + POOL_SIZE * h + p)* input_wid + POOL_SIZE * w + q) * numInput + output_num]; } } Y[yoffset]=sum/4.0; } } __global__ void gpu_relu4 (float * X, int total) { int X_idx = blockDim.x * blockIdx.x + threadIdx.x; if (X_idx < total){ if (X[X_idx]<0.0) X[X_idx]=0.0f; // X[X_idx] = (X[X_idx] < 0) ? 0 : X[X_idx]; } } // NN using shared memory __global__ void gpu_fully_forward(float *X, float *W, float *Y, int output_size, int input_size){ __shared__ float datain[1024]; int tx=threadIdx.x; int bx=blockIdx.x; float tmp; // load data in // eliminate relu2 by checking if input is less than 0 if (tx<input_size) tmp=X[bx*blockDim.x+tx]; if (tmp<0) tmp=0.0f; datain[tx]=tmp; __syncthreads(); // calculate result if (tx<output_size){ int i; float sum=0.0f; for (i=0;i<input_size;i++) sum+=datain[i]*W[i*output_size+tx]; Y[bx*output_size+tx]=sum; } } static int loadData(float *x, float *y) { // Open the data file const auto file_id = H5Fopen(FLAGS_testdata.c_str(), H5F_ACC_RDWR, H5P_DEFAULT); // Open the dataset x and y const auto x_id = H5Dopen2(file_id, "/x", H5P_DEFAULT); const auto y_id = H5Dopen2(file_id, "/y", H5P_DEFAULT); // Get the dataset x dimensions const auto xspace = H5Dget_space(x_id); const auto xndims = H5Sget_simple_extent_ndims(xspace); assert(xndims == 4); hsize_t input_dims[xndims]; H5Sget_simple_extent_dims(xspace, input_dims, NULL); if (input_dims[0] != FLAGS_batch_size) { std::cout << "data size does not match batch size specified!\n"; return 1; // return error } std::cout << "input dimensions = " << input_dims[0] << " x " << input_dims[1] << " x " << input_dims[2] << " x " << input_dims[3] << "\n"; // Read the dataset x and y check_success( H5Dread(x_id, H5T_NATIVE_FLOAT, H5S_ALL, H5S_ALL, H5P_DEFAULT, x)); check_success( H5Dread(y_id, H5T_NATIVE_FLOAT, H5S_ALL, H5S_ALL, H5P_DEFAULT, y)); // Close the dataset x and y check_success(H5Dclose(x_id)); check_success(H5Dclose(y_id)); // Close the file check_success(H5Fclose(file_id)); // return success return 0; } static void loadModel(float *conv1, float *conv2, float *fc1, float *fc2) { // Open the model file const auto file_id = H5Fopen(FLAGS_model.c_str(), H5F_ACC_RDWR, H5P_DEFAULT); // Open the dataset const auto conv1_id = H5Dopen2(file_id, "/conv1", H5P_DEFAULT); const auto conv2_id = H5Dopen2(file_id, "/conv2", H5P_DEFAULT); const auto fc1_id = H5Dopen2(file_id, "/fc1", H5P_DEFAULT); const auto fc2_id = H5Dopen2(file_id, "/fc2", H5P_DEFAULT); // Read the dataset check_success(H5Dread(conv1_id, H5T_NATIVE_FLOAT, H5S_ALL, H5S_ALL, H5P_DEFAULT, conv1)); check_success(H5Dread(conv2_id, H5T_NATIVE_FLOAT, H5S_ALL, H5S_ALL, H5P_DEFAULT, conv2)); check_success( H5Dread(fc1_id, H5T_NATIVE_FLOAT, H5S_ALL, H5S_ALL, H5P_DEFAULT, fc1)); check_success( H5Dread(fc2_id, H5T_NATIVE_FLOAT, H5S_ALL, H5S_ALL, H5P_DEFAULT, fc2)); // Close the dataset x and y check_success(H5Dclose(conv1_id)); check_success(H5Dclose(conv2_id)); check_success(H5Dclose(fc1_id)); check_success(H5Dclose(fc2_id)); // Close the file check_success(H5Fclose(file_id)); } // From book chapter Figure 16.4 static void conv_forward_valid(const float *X, const int xdims[4], const float *W, const int wdims[4], float *Y, const int ydims[4]) { const auto filter_h = wdims[0]; const auto filter_w = wdims[1]; const auto in_channel = wdims[2]; for (const auto i : range(0, ydims[0])) { //number of input feature maps for (const auto m : range(0, ydims[3])) { // number of output feature maps for (const auto h : range(0, ydims[1])) { // image width for (const auto w : range(0, ydims[2])) { // image height for (const auto p : range(0, filter_h)) { // filter height for (const auto q : range(0, filter_w)) { // filter width for (const auto c : range(0, in_channel)) { // number of filters const auto yoffset = ((i * ydims[1] + h) * ydims[2] + w) * ydims[3] + m; const auto xoffset = i * xdims[1] * xdims[2] * xdims[3] + (h + p) * xdims[2] * xdims[3] + (w + q) * xdims[3] + c; const auto woffset = p * wdims[1] * wdims[2] * wdims[3] + q * wdims[2] * wdims[3] + c * wdims[3] + m; Y[yoffset] += X[xoffset] * W[woffset]; } } } } } } } } // Recified linear unit 4d static void relu4(float *X, const int xdims[4]) { for (const auto i : range(0, xdims[0] * xdims[1] * xdims[2] * xdims[3])) { X[i] = (X[i] < 0) ? 0 : X[i]; } } // Recified linear unit 2d static void relu2(float *X, const int xdims[2]) { for (const auto i : range(0, xdims[0] * xdims[1])) { X[i] = (X[i] < 0) ? 0 : X[i]; } } // From book chapter Figure 16.5 static void average_pool(const float *X, const int xdims[4], const int pool_size, float *Y, const int ydims[4]) { for (const auto i : range(0, ydims[0])) { for (const auto m : range(0, ydims[3])) { for (const auto w : range(0, ydims[2])) { for (const auto h : range(0, ydims[1])) { for (const auto p : range(0, pool_size)) { for (const auto q : range(0, pool_size)) { const auto yoffset = ((i * ydims[1] + h) * ydims[2] + w) * ydims[3] + m; const auto xoffset = i * xdims[1] * xdims[2] * xdims[3] + (pool_size * h + p) * xdims[2] * xdims[3] + (pool_size * w + q) * xdims[3] + m; Y[yoffset] += X[xoffset] / (1.0f * pool_size * pool_size); } } } } } } } static void fully_forward(const float *X, const int xdims[2], float *W, const int wdims[2], float *Y, const int ydims[2]) { for (const auto i : range(0, xdims[0])) { for (const auto j : range(0, wdims[1])) { float sum = 0; for (const auto k : range(0, xdims[1])) { sum += X[i * xdims[1] + k] * W[k * wdims[1] + j]; } Y[i * wdims[1] + j] = sum; } } } // Choose the guess with largest score static void argmax(const float *X, const int xdims[2], int *Y) { for (const auto i : range(0, xdims[0])) { auto max_idx = 0; auto max = X[i * xdims[1]]; for (const auto j : range(0, xdims[1])) { const auto elem = X[(i * xdims[1]) + j]; if (elem > max) { max_idx = j; max = elem; } } Y[i] = max_idx; } } // Forward operation for the CNN, a combination of conv layer + average pooling // + relu void forward_operation(float *x, float *conv1, float *conv2, float *fc1, float *fc2, int *out) { // conv layer const int adims[] = {xdims[0], (xdims[1] - conv1dims[0] + 1), (xdims[2] - conv1dims[1] + 1), conv1dims[3]}; auto a = zeros<float>(adims); conv_forward_valid(x, xdims, conv1, conv1dims, a, adims); // int i,j,k,l; /// relu layer relu4(a, adims); // average pooling const int pool_size = 2; const int bdims[] = {adims[0], adims[1] / pool_size, adims[2] / pool_size, adims[3]}; auto b = zeros<float>(bdims); average_pool(a, adims, pool_size, b, bdims); // // for (i=0;i<10;i++){ // for (j=0;j<24;j++){ // for (k=0;k<24;k++){ // for (l=0;l<32;l++) // printf("%.4f ",a[i*32*24*24+j*32*24+k*32+l]); // printf("\n"); // } // printf("\n"); // } // printf("\n\n"); // } // conv layer const int cdims[] = {bdims[0], (bdims[1] - conv2dims[0] + 1), (bdims[2] - conv2dims[1] + 1), conv2dims[3]}; auto c = zeros<float>(cdims); conv_forward_valid(b, bdims, conv2, conv2dims, c, cdims); // relu relu4(c, cdims); // average pooling const int ddims[] = {cdims[0], cdims[1] / pool_size, cdims[2] / pool_size, cdims[3]}; auto d = zeros<float>(ddims); average_pool(c, cdims, pool_size, d, ddims); // reshape const int ddims2[] = {ddims[0], ddims[1] * ddims[2] * ddims[3]}; // matrix multiplication const int edims[] = {ddims[0], fc1dims[1]}; auto e = zeros<float>(edims); fully_forward(d, ddims2, fc1, fc1dims, e, edims); // relu relu2(e, edims); // matrix multiplication const int fdims[] = {edims[0], fc2dims[1]}; auto f = zeros<float>(fdims); fully_forward(e, edims, fc2, fc2dims, f, fdims); // for (i=0;i<10;i++){ // for (j=0;j<10;j++){ // printf("%.4f ",f[i*10+j]); // } // printf("\n"); // } argmax(f, fdims, out); delete[] a; delete[] b; delete[] c; delete[] d; delete[] e; delete[] f; } // Forward operation for the CNN, a combination of conv layer + average pooling // + relu void forward_operation_gpu(float *x, float *conv1, float *conv2, float *fc1, float *fc2, int *out) { // conv layer float *conv1_input; float *conv1_output; float *conv2_input; float *conv2_output; float *W1; float *W2; float *NN_L1_input; float *NN_L2_input; float *NN_output_gpu; float *NN_L1_weights; float *NN_L2_weights; int argdim[2]={xdims[0],fc2dims[1]}; float *argmax_input=zeros<float>(argdim); int x1dim[4]={xdims[0],xdims[1],xdims[2],xdims[3]}; int y1dim[4]={xdims[0],xdims[1]-conv1dims[0]+1,xdims[2]-conv1dims[1]+1,conv1dims[3]}; int x2dim[4]={xdims[0],y1dim[1]/2,y1dim[2]/2,y1dim[3]}; int y2dim[4]={xdims[0],x2dim[1]-conv2dims[0]+1,x2dim[2]-conv2dims[1]+1,conv2dims[3]}; int NN_1_dim[4]={xdims[0],y2dim[1]/2,y2dim[2]/2,y2dim[3]}; int NN_2_dim[2]={xdims[0],fc1dims[1]}; // allocate global memory check_success(cudaMalloc(&conv1_input,sizeof(float)*x1dim[0]*x1dim[1]*x1dim[2]*x1dim[3])); check_success(cudaMalloc(&conv1_output,sizeof(float)*y1dim[0]*y1dim[1]*y1dim[2]*y1dim[3])); check_success(cudaMalloc(&conv2_input,sizeof(float)*x2dim[0]*x2dim[1]*x2dim[2]*x2dim[3])); check_success(cudaMalloc(&conv2_output,sizeof(float)*y2dim[0]*y2dim[1]*y2dim[2]*y2dim[3])); check_success(cudaMalloc(&W1,sizeof(float)*conv1dims[0]*conv1dims[1]*conv1dims[2]*conv1dims[3])); check_success(cudaMalloc(&W2,sizeof(float)*conv2dims[0]*conv2dims[1]*conv2dims[2]*conv2dims[3])); check_success(cudaMalloc(&NN_L1_input,sizeof(float)*NN_1_dim[0]*NN_1_dim[1]*NN_1_dim[2]*NN_1_dim[3])); check_success(cudaMalloc(&NN_L2_input,sizeof(float)*NN_2_dim[0]*NN_2_dim[1])); check_success(cudaMalloc(&NN_L1_weights,sizeof(float)*fc1dims[0]*fc1dims[1])); check_success(cudaMalloc(&NN_L2_weights,sizeof(float)*fc2dims[0]*fc2dims[1])); check_success(cudaMalloc(&NN_output_gpu,sizeof(float)*argdim[0]*argdim[1])); check_success(cudaMemcpy(conv1_input,x,sizeof(float)*x1dim[0]*x1dim[1]*x1dim[2]*x1dim[3],cudaMemcpyHostToDevice)); check_success(cudaMemcpy(W2,conv2,sizeof(float)*conv2dims[0]*conv2dims[1]*conv2dims[2]*conv2dims[3],cudaMemcpyHostToDevice)); check_success(cudaMemcpy(W1,conv1,sizeof(float)*conv1dims[0]*conv1dims[1]*conv1dims[2]*conv1dims[3],cudaMemcpyHostToDevice)); check_success(cudaMemcpy(NN_L1_weights,fc1,sizeof(float)*fc1dims[0]*fc1dims[1],cudaMemcpyHostToDevice)); check_success(cudaMemcpy(NN_L2_weights,fc2,sizeof(float)*fc2dims[0]*fc2dims[1],cudaMemcpyHostToDevice)); int Y=y1dim[1]*y1dim[2]/CONV_TILE; if ((y1dim[1]*y1dim[2])%CONV_TILE) Y++; int X=y1dim[3]/CONV_TILE; if (y1dim[3]%CONV_TILE) X++; int W_grid=0; dim3 conv1_block (CONV_TILE, CONV_TILE,1); dim3 conv1_grid (xdims[0],X,Y); convLayerForwardMatrixKernel<<<conv1_grid,conv1_block>>>(conv1_input,W1,conv1_output,x1dim[1],y1dim[1],x1dim[3],y1dim[3]); // average pool W_grid = x2dim[1] / TILE_WIDTH; int H_grid = x2dim[2] / TILE_WIDTH; if (y1dim[1]%TILE_WIDTH){ W_grid++; H_grid++; } Y = H_grid * W_grid; dim3 avg_pool_1_block (TILE_WIDTH, TILE_WIDTH,1); dim3 avg_pool_1_grid (xdims[0], x2dim[3], Y); averagePool<<<avg_pool_1_grid,avg_pool_1_block>>>(conv1_output,conv2_input,W_grid,y1dim[1],x2dim[1],y1dim[3]); // second conv Y=y2dim[1]*y2dim[2]/CONV_TILE; if ((y2dim[1]*y2dim[2])%CONV_TILE) Y++; X=y2dim[3]/CONV_TILE; if (y2dim[3]%CONV_TILE) X++; dim3 conv2_block (CONV_TILE, CONV_TILE,1); dim3 conv2_grid (xdims[0],X,Y); convLayerForwardMatrixKernel<<<conv2_grid,conv2_block>>>(conv2_input,W2,conv2_output,x2dim[1],y2dim[1],x2dim[3],y2dim[3]); // average pooling W_grid = NN_1_dim[1] / TILE_WIDTH; H_grid = NN_1_dim[2] / TILE_WIDTH; if (y1dim[1]%TILE_WIDTH){ W_grid++; H_grid++; } Y = H_grid * W_grid; dim3 avg_pool_2_block (TILE_WIDTH, TILE_WIDTH,1); dim3 avg_pool_2_grid (xdims[0], NN_1_dim[3], Y); averagePool<<<avg_pool_2_grid,avg_pool_2_block>>>(conv2_output,NN_L1_input,W_grid,y2dim[1],NN_1_dim[1],y2dim[3]); dim3 fully_forward_1_grid(xdims[0],1,1); dim3 fully_forward_1_block(fc1dims[0],1,1); gpu_fully_forward<<<fully_forward_1_grid,fully_forward_1_block>>>(NN_L1_input,NN_L1_weights,NN_L2_input,fc1dims[1],fc1dims[0]); dim3 fully_forward_2_grid(xdims[0],1,1); dim3 fully_forward_2_block(fc2dims[0],1,1); gpu_fully_forward<<<fully_forward_2_grid,fully_forward_2_block>>>(NN_L2_input,NN_L2_weights,NN_output_gpu,fc2dims[1],fc2dims[0]); check_success(cudaMemcpy(argmax_input,NN_output_gpu,sizeof(float)*xdims[0]*fc2dims[1],cudaMemcpyDeviceToHost)); cudaDeviceSynchronize(); check_success(cudaFree(conv1_input)); check_success(cudaFree(conv1_output)); check_success(cudaFree(conv2_input)); check_success(cudaFree(conv2_output)); check_success(cudaFree(W1)); check_success(cudaFree(W2)); check_success(cudaFree(NN_L1_input)); check_success(cudaFree(NN_L2_input)); check_success(cudaFree(NN_L1_weights)); check_success(cudaFree(NN_L2_weights)); check_success(cudaFree(NN_output_gpu)); const int fdims[] = {xdims[0], fc2dims[1]}; argmax(argmax_input, fdims, out); free(argmax_input); } int main(int argc, char **argv) { if (argc != 3 && argc != 4) { std::cerr << "\n" << "This program performs the forward opertion step for " "Convolutional Neural Network(CNN). " "Sample usage: \n" << argv[0] << " [../data/test10.hdf5] [../data/model.hdf5] [10]\n"; return -1; } FLAGS_testdata = std::string(argv[1]); FLAGS_model = std::string(argv[2]); if (argc == 3) { const std::map<std::string, int> default_batch_sizes{ {"../data/test2.hdf5", 2}, {"../data/test10.hdf5", 10}, {"../data/test100.hdf5", 100}, {"../data/testfull.hdf5", 10000}}; const auto batch_size_in_map = default_batch_sizes.find(FLAGS_testdata); if (batch_size_in_map == default_batch_sizes.end()) { std::cerr << "\nERROR:: Unrecognized file " << FLAGS_testdata << " batch_size must be specified.\n"; return -1; } FLAGS_batch_size = batch_size_in_map->second; } else if (argc == 4) { FLAGS_batch_size = atoi(argv[3]); } xdims[0] = FLAGS_batch_size; rdims[0] = FLAGS_batch_size; // Load data into x and y float *x = allocate<float>(xdims); float *y = allocate<float>(rdims); loadData(x, y); // Load model float *conv1 = allocate<float>(conv1dims); float *conv2 = allocate<float>(conv2dims); float *fc1 = allocate<float>(fc1dims); float *fc2 = allocate<float>(fc2dims); loadModel(conv1, conv2, fc1, fc2); // Perform foward opertion int *out = zeros<int>(FLAGS_batch_size); // get start time const auto start = now(); forward_operation_gpu(x, conv1, conv2, fc1, fc2, out); // get end time const auto end = now(); // get elapsed time in milliseconds const auto elapsed = std::chrono::duration<double, std::milli>(end - start).count(); // Get reference int *ref = zeros<int>(FLAGS_batch_size); argmax(y, rdims, ref); // Calculate correctness int num_correct = 0; for (const auto i : range(0, FLAGS_batch_size)) { if (out[i] == ref[i]) { num_correct++; } } std::cout << "Done with " << FLAGS_batch_size << " queries in " << "elapsed = " << elapsed << " milliseconds. Correctness: " << static_cast<float>(num_correct) / FLAGS_batch_size << "\n"; delete[] x; delete[] y; delete[] conv1; delete[] conv2; delete[] fc1; delete[] fc2; delete[] out; delete[] ref; return 0; }
74ea68caccc4b6fb350b1e5c800f0a66598852a2.hip
// !!! This is a file automatically generated by hipify!!! /* Copyright 2022 NVIDIA Corporation * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. * */ #include "cunumeric/sort/cub_sort.cuh" namespace cunumeric { void cub_local_sort(const __half* values_in, __half* values_out, const int64_t* indices_in, int64_t* indices_out, const size_t volume, const size_t sort_dim_size, hipStream_t stream) { detail::cub_local_sort( values_in, values_out, indices_in, indices_out, volume, sort_dim_size, stream); } } // namespace cunumeric
74ea68caccc4b6fb350b1e5c800f0a66598852a2.cu
/* Copyright 2022 NVIDIA Corporation * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. * */ #include "cunumeric/sort/cub_sort.cuh" namespace cunumeric { void cub_local_sort(const __half* values_in, __half* values_out, const int64_t* indices_in, int64_t* indices_out, const size_t volume, const size_t sort_dim_size, cudaStream_t stream) { detail::cub_local_sort( values_in, values_out, indices_in, indices_out, volume, sort_dim_size, stream); } } // namespace cunumeric
3c98b48bcdbb05f6cb999cc2247129ca9475d5e3.hip
// !!! This is a file automatically generated by hipify!!! #include "PlayerNormalBulletUpdater.h" #include <hip/hip_runtime.h> #include "../PlayerNormalBullet.h" #include "PlayerNormalBulletUpdaterKernel.cuh" void PlayerNormalBulletUpdater::Run() { int length = PlayerNormalBulletPtr::length; // CPUGPU hipMemcpyAsync( PlayerNormalBulletPtr::device, PlayerNormalBulletPtr::host, sizeof(PlayerNormalBullet) * length, hipMemcpyHostToDevice, PlayerNormalBulletPtr::stream); dim3 block(256, 1, 1); dim3 grid((length + 256 - 1) / 256, 1, 1); // PlayerNormalBulletUpdaterKernel::Process << <grid, block, 0, PlayerNormalBulletPtr::stream >> > (PlayerNormalBulletPtr::device, length); // GPUCPU hipMemcpyAsync( PlayerNormalBulletPtr::host, PlayerNormalBulletPtr::device, sizeof(PlayerNormalBullet) * length, hipMemcpyDeviceToHost, PlayerNormalBulletPtr::stream); }
3c98b48bcdbb05f6cb999cc2247129ca9475d5e3.cu
#include "PlayerNormalBulletUpdater.h" #include <cuda_runtime.h> #include "../PlayerNormalBullet.h" #include "PlayerNormalBulletUpdaterKernel.cuh" void PlayerNormalBulletUpdater::Run() { int length = PlayerNormalBulletPtr::length; // CPUからGPUにデータを転送 cudaMemcpyAsync( PlayerNormalBulletPtr::device, PlayerNormalBulletPtr::host, sizeof(PlayerNormalBullet) * length, cudaMemcpyHostToDevice, PlayerNormalBulletPtr::stream); dim3 block(256, 1, 1); dim3 grid((length + 256 - 1) / 256, 1, 1); // プレイヤー通常弾更新カーネルを実行 PlayerNormalBulletUpdaterKernel::Process << <grid, block, 0, PlayerNormalBulletPtr::stream >> > (PlayerNormalBulletPtr::device, length); // GPUからCPUにデータを転送 cudaMemcpyAsync( PlayerNormalBulletPtr::host, PlayerNormalBulletPtr::device, sizeof(PlayerNormalBullet) * length, cudaMemcpyDeviceToHost, PlayerNormalBulletPtr::stream); }
81309a11c3ea0a7e18ee6bf5ce938ed4804993c9.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include <stdio.h> #include <string.h> #include <stdlib.h> #include <time.h> #include <math.h> #define CUDA 0 #define OPENMP 1 #define SPHERES 20 #define rnd( x ) (x * rand() / RAND_MAX) #define INF 2e10f #define DIM 2048 struct Sphere { float r,b,g; float radius; float x,y,z; }; __device__ float hit( float x, float y, float z, float ox, float oy, float *n, float radius ) { float dx = ox - x; float dy = oy - y; if (dx*dx + dy*dy < radius*radius) { float dz = sqrtf( radius*radius - dx*dx - dy*dy ); *n = dz / sqrtf( radius * radius ); return dz + z; } return -INF; } __global__ void kernel(struct Sphere* s, unsigned char* ptr) { int x = blockIdx.x; int y = blockIdx.y; int offset = x + y*DIM; float ox = (x - DIM/2); float oy = (y - DIM/2); float r=0, g=0, b=0; float maxz = -INF; for(int i=0; i<SPHERES; i++) { float n; float t = hit( s[i].x, s[i].y, s[i].z, ox, oy, &n, s[i].radius ); if (t > maxz) { float fscale = n; r = s[i].r * fscale; g = s[i].g * fscale; b = s[i].b * fscale; maxz = t; } } ptr[offset*4 + 0] = (int)(r * 255); ptr[offset*4 + 1] = (int)(g * 255); ptr[offset*4 + 2] = (int)(b * 255); ptr[offset*4 + 3] = 255; } void ppm_write(unsigned char* bitmap, int xdim,int ydim, FILE* fp) { int i,x,y; fprintf(fp,"P3\n"); fprintf(fp,"%d %d\n",xdim, ydim); fprintf(fp,"255\n"); for (y=0;y<ydim;y++) { for (x=0;x<xdim;x++) { i=x+y*xdim; fprintf(fp,"%d %d %d ",bitmap[4*i],bitmap[4*i+1],bitmap[4*i+2]); } fprintf(fp,"\n"); } } int main(int argc, char* argv[]) { double exe_time; clock_t start_time, end_time; struct Sphere *temp_s; unsigned char* bitmap; struct Sphere *d_temp_s; unsigned char* d_bitmap; dim3 blocks(DIM,DIM,1); // Error detection code if (argc!=2) { printf("> a.out [filename.ppm]\n"); printf("for example, '> a.out result.ppm' means executing CUDA\n"); exit(0); } // Start Timer srand(time(NULL)); start_time = clock(); // Allocate the memory on host bitmap = (unsigned char*)malloc(sizeof(unsigned char)*DIM*DIM*4); temp_s = (struct Sphere*)malloc(sizeof(struct Sphere) * SPHERES); // Allocate the memory on device hipMalloc( (void**)&d_temp_s, sizeof(struct Sphere) * SPHERES ); hipMalloc( (void**)&d_bitmap, sizeof(unsigned char)*DIM*DIM*4 ); // Generate the spheres for (int i=0; i<SPHERES; i++) { temp_s[i].r = rnd( 1.0f ); temp_s[i].g = rnd( 1.0f ); temp_s[i].b = rnd( 1.0f ); temp_s[i].x = rnd( 2000.0f ) - 1000; temp_s[i].y = rnd( 2000.0f ) - 1000; temp_s[i].z = rnd( 2000.0f ) - 1000; temp_s[i].radius = rnd( 200.0f ) + 40; } // Move data to device hipMemcpy ( d_temp_s, temp_s, sizeof(struct Sphere) * SPHERES, hipMemcpyHostToDevice ); // Calculate the ray hipLaunchKernelGGL(( kernel), dim3(blocks), dim3(1), 0, 0, d_temp_s, d_bitmap); hipDeviceSynchronize(); hipMemcpy ( bitmap, d_bitmap, sizeof(unsigned char)*DIM*DIM*4, hipMemcpyDeviceToHost ); // open the file FILE* fp = fopen(argv[1],"w"); ppm_write(bitmap,DIM,DIM,fp); // Write the image // Stop Timer end_time = clock(); exe_time = ((double)(end_time - start_time)) / CLOCKS_PER_SEC; // Print the result printf("CUDA ray tracing: %f sec\n", exe_time); printf("[%s] was generated\n", argv[1]); // Close the file and free the memory fclose(fp); free(bitmap); free(temp_s); hipFree(d_bitmap); hipFree(d_temp_s); return 0; }
81309a11c3ea0a7e18ee6bf5ce938ed4804993c9.cu
#include <stdio.h> #include <string.h> #include <stdlib.h> #include <time.h> #include <math.h> #define CUDA 0 #define OPENMP 1 #define SPHERES 20 #define rnd( x ) (x * rand() / RAND_MAX) #define INF 2e10f #define DIM 2048 struct Sphere { float r,b,g; float radius; float x,y,z; }; __device__ float hit( float x, float y, float z, float ox, float oy, float *n, float radius ) { float dx = ox - x; float dy = oy - y; if (dx*dx + dy*dy < radius*radius) { float dz = sqrtf( radius*radius - dx*dx - dy*dy ); *n = dz / sqrtf( radius * radius ); return dz + z; } return -INF; } __global__ void kernel(struct Sphere* s, unsigned char* ptr) { int x = blockIdx.x; int y = blockIdx.y; int offset = x + y*DIM; float ox = (x - DIM/2); float oy = (y - DIM/2); float r=0, g=0, b=0; float maxz = -INF; for(int i=0; i<SPHERES; i++) { float n; float t = hit( s[i].x, s[i].y, s[i].z, ox, oy, &n, s[i].radius ); if (t > maxz) { float fscale = n; r = s[i].r * fscale; g = s[i].g * fscale; b = s[i].b * fscale; maxz = t; } } ptr[offset*4 + 0] = (int)(r * 255); ptr[offset*4 + 1] = (int)(g * 255); ptr[offset*4 + 2] = (int)(b * 255); ptr[offset*4 + 3] = 255; } void ppm_write(unsigned char* bitmap, int xdim,int ydim, FILE* fp) { int i,x,y; fprintf(fp,"P3\n"); fprintf(fp,"%d %d\n",xdim, ydim); fprintf(fp,"255\n"); for (y=0;y<ydim;y++) { for (x=0;x<xdim;x++) { i=x+y*xdim; fprintf(fp,"%d %d %d ",bitmap[4*i],bitmap[4*i+1],bitmap[4*i+2]); } fprintf(fp,"\n"); } } int main(int argc, char* argv[]) { double exe_time; clock_t start_time, end_time; struct Sphere *temp_s; unsigned char* bitmap; struct Sphere *d_temp_s; unsigned char* d_bitmap; dim3 blocks(DIM,DIM,1); // Error detection code if (argc!=2) { printf("> a.out [filename.ppm]\n"); printf("for example, '> a.out result.ppm' means executing CUDA\n"); exit(0); } // Start Timer srand(time(NULL)); start_time = clock(); // Allocate the memory on host bitmap = (unsigned char*)malloc(sizeof(unsigned char)*DIM*DIM*4); temp_s = (struct Sphere*)malloc(sizeof(struct Sphere) * SPHERES); // Allocate the memory on device cudaMalloc( (void**)&d_temp_s, sizeof(struct Sphere) * SPHERES ); cudaMalloc( (void**)&d_bitmap, sizeof(unsigned char)*DIM*DIM*4 ); // Generate the spheres for (int i=0; i<SPHERES; i++) { temp_s[i].r = rnd( 1.0f ); temp_s[i].g = rnd( 1.0f ); temp_s[i].b = rnd( 1.0f ); temp_s[i].x = rnd( 2000.0f ) - 1000; temp_s[i].y = rnd( 2000.0f ) - 1000; temp_s[i].z = rnd( 2000.0f ) - 1000; temp_s[i].radius = rnd( 200.0f ) + 40; } // Move data to device cudaMemcpy ( d_temp_s, temp_s, sizeof(struct Sphere) * SPHERES, cudaMemcpyHostToDevice ); // Calculate the ray kernel<<<blocks, 1>>>(d_temp_s, d_bitmap); cudaDeviceSynchronize(); cudaMemcpy ( bitmap, d_bitmap, sizeof(unsigned char)*DIM*DIM*4, cudaMemcpyDeviceToHost ); // open the file FILE* fp = fopen(argv[1],"w"); ppm_write(bitmap,DIM,DIM,fp); // Write the image // Stop Timer end_time = clock(); exe_time = ((double)(end_time - start_time)) / CLOCKS_PER_SEC; // Print the result printf("CUDA ray tracing: %f sec\n", exe_time); printf("[%s] was generated\n", argv[1]); // Close the file and free the memory fclose(fp); free(bitmap); free(temp_s); cudaFree(d_bitmap); cudaFree(d_temp_s); return 0; }
02c4a1d0490551bc8becc904443daecf9d70bbb1.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include "kernels_hip.cuh" #include "help_functions.cuh" #include "window.cuh" namespace hlp = help_functions; __global__ void makeMetaSpheres(const Boid* boids, float* canvas) { __shared__ uint boid_id; __shared__ int start_x; __shared__ int start_y; if(threadIdx.x == 0 && threadIdx.y == 0) { boid_id = blockIdx.x; start_x = boids[boid_id].pos.x - data::HALF_IMPACT; start_y = boids[boid_id].pos.y - data::HALF_IMPACT; } __syncthreads(); int offset_y = threadIdx.y; while(offset_y < data::IMPACT_WIDTH) { int offset_x = threadIdx.x; while(offset_x < data::IMPACT_WIDTH) { int x = start_x + offset_x; int y = start_y + offset_y; if(0 <= x && x < data::WIDTH && 0 <= y && y < data::HEIGHT) { float to_add = data::META_RADIUS * hlp::fisqrt((x - boids[boid_id].pos.x) * (x - boids[boid_id].pos.x) + (y - boids[boid_id].pos.y) * (y - boids[boid_id].pos.y)); //float* cell = ; //if(*cell + to_add <= 0.5f) atomicAdd(&canvas[x + y * data::WIDTH], to_add); //atomicAdd(&canvas[x + y * data::WIDTH], 0.5f); } __syncthreads(); offset_x += data::IMPACT_THREAD_WIDTH; } offset_y += data::IMPACT_THREAD_WIDTH; } } __global__ void makeMetaSpheresFull(const Boid* boids, float* canvas) { __shared__ uint boid_id; if(threadIdx.x == 0 && threadIdx.y == 0) { boid_id = blockIdx.x; } __syncthreads(); int y = threadIdx.y; while(y < data::HEIGHT) { int x = threadIdx.x; while(x < data::WIDTH) { float to_add = data::META_RADIUS * hlp::fisqrt((x - boids[boid_id].pos.x) * (x - boids[boid_id].pos.x) + (y - boids[boid_id].pos.y) * (y - boids[boid_id].pos.y)); atomicAdd(&canvas[x + y * data::WIDTH], to_add); __syncthreads(); x += data::IMPACT_THREAD_WIDTH; } y += data::IMPACT_THREAD_WIDTH; } } using namespace data; __global__ void initBoids(Boid* boids, const int boidsCount) { th_id i = threadIdx.x + blockIdx.x * blockDim.x; if(i < boidsCount) { const float step = 360.0f / boidsCount; const float ring_radius = WIDTH / 4; Vec pos = Vec(WIDTH / 2 + ring_radius * cos(i * RAD * step), HEIGHT / 2 + ring_radius * sin(i * RAD * step)); Vec vel = Vec(WIDTH / 2 - pos.x, HEIGHT / 2 - pos.y); vel.Limit(0.5f + 2.0f * i / boidsCount); boids[i] = Boid(pos, vel); } } template <class T> __global__ void clearArray<T>(T* arr) { th_id x = threadIdx.x + blockIdx.x * blockDim.x; th_id y = threadIdx.y + blockIdx.y * blockDim.y; th_id offset = x + y * blockDim.x * gridDim.x; arr[offset] = 0.0f; } __host__ void dummyKernelsTemplate() { hipLaunchKernelGGL(( clearArray<float>), dim3(0), dim3(0), 0, 0, NULL); } __device__ void setPixel(uchar* ptr, int offset, const uchar value) { offset *= 3; ptr[offset + 0] = value; ptr[offset + 1] = value; ptr[offset + 2] = value; } __global__ void floatToColor(const float* values, uchar* colors) { th_id x = threadIdx.x + blockIdx.x * blockDim.x; th_id y = threadIdx.y + blockIdx.y * blockDim.y; th_id offset = x + y * blockDim.x * gridDim.x; if(offset < GRID_SIZE) { uchar value; if(values[offset] < 0.5f) setPixel(colors, offset, 0); else setPixel(colors, offset, 255); //setPixel(colors, offset, hlp::min<uchar>(100, values[offset] * 255)); //setPixel(colors, offset, 100); } } __global__ void moveBoids(Boid* boids, const int boidsCount) { th_id i = threadIdx.x + blockIdx.x * blockDim.x; if(i < boidsCount) { boids[i].UpdatePosition(data::WIDTH, data::HEIGHT); } }
02c4a1d0490551bc8becc904443daecf9d70bbb1.cu
#include "kernels.cuh" #include "help_functions.cuh" #include "window.cuh" namespace hlp = help_functions; __global__ void makeMetaSpheres(const Boid* boids, float* canvas) { __shared__ uint boid_id; __shared__ int start_x; __shared__ int start_y; if(threadIdx.x == 0 && threadIdx.y == 0) { boid_id = blockIdx.x; start_x = boids[boid_id].pos.x - data::HALF_IMPACT; start_y = boids[boid_id].pos.y - data::HALF_IMPACT; } __syncthreads(); int offset_y = threadIdx.y; while(offset_y < data::IMPACT_WIDTH) { int offset_x = threadIdx.x; while(offset_x < data::IMPACT_WIDTH) { int x = start_x + offset_x; int y = start_y + offset_y; if(0 <= x && x < data::WIDTH && 0 <= y && y < data::HEIGHT) { float to_add = data::META_RADIUS * hlp::fisqrt((x - boids[boid_id].pos.x) * (x - boids[boid_id].pos.x) + (y - boids[boid_id].pos.y) * (y - boids[boid_id].pos.y)); //float* cell = ; //if(*cell + to_add <= 0.5f) atomicAdd(&canvas[x + y * data::WIDTH], to_add); //atomicAdd(&canvas[x + y * data::WIDTH], 0.5f); } __syncthreads(); offset_x += data::IMPACT_THREAD_WIDTH; } offset_y += data::IMPACT_THREAD_WIDTH; } } __global__ void makeMetaSpheresFull(const Boid* boids, float* canvas) { __shared__ uint boid_id; if(threadIdx.x == 0 && threadIdx.y == 0) { boid_id = blockIdx.x; } __syncthreads(); int y = threadIdx.y; while(y < data::HEIGHT) { int x = threadIdx.x; while(x < data::WIDTH) { float to_add = data::META_RADIUS * hlp::fisqrt((x - boids[boid_id].pos.x) * (x - boids[boid_id].pos.x) + (y - boids[boid_id].pos.y) * (y - boids[boid_id].pos.y)); atomicAdd(&canvas[x + y * data::WIDTH], to_add); __syncthreads(); x += data::IMPACT_THREAD_WIDTH; } y += data::IMPACT_THREAD_WIDTH; } } using namespace data; __global__ void initBoids(Boid* boids, const int boidsCount) { th_id i = threadIdx.x + blockIdx.x * blockDim.x; if(i < boidsCount) { const float step = 360.0f / boidsCount; const float ring_radius = WIDTH / 4; Vec pos = Vec(WIDTH / 2 + ring_radius * cos(i * RAD * step), HEIGHT / 2 + ring_radius * sin(i * RAD * step)); Vec vel = Vec(WIDTH / 2 - pos.x, HEIGHT / 2 - pos.y); vel.Limit(0.5f + 2.0f * i / boidsCount); boids[i] = Boid(pos, vel); } } template <class T> __global__ void clearArray<T>(T* arr) { th_id x = threadIdx.x + blockIdx.x * blockDim.x; th_id y = threadIdx.y + blockIdx.y * blockDim.y; th_id offset = x + y * blockDim.x * gridDim.x; arr[offset] = 0.0f; } __host__ void dummyKernelsTemplate() { clearArray<float><<<0, 0>>>(NULL); } __device__ void setPixel(uchar* ptr, int offset, const uchar value) { offset *= 3; ptr[offset + 0] = value; ptr[offset + 1] = value; ptr[offset + 2] = value; } __global__ void floatToColor(const float* values, uchar* colors) { th_id x = threadIdx.x + blockIdx.x * blockDim.x; th_id y = threadIdx.y + blockIdx.y * blockDim.y; th_id offset = x + y * blockDim.x * gridDim.x; if(offset < GRID_SIZE) { uchar value; if(values[offset] < 0.5f) setPixel(colors, offset, 0); else setPixel(colors, offset, 255); //setPixel(colors, offset, hlp::min<uchar>(100, values[offset] * 255)); //setPixel(colors, offset, 100); } } __global__ void moveBoids(Boid* boids, const int boidsCount) { th_id i = threadIdx.x + blockIdx.x * blockDim.x; if(i < boidsCount) { boids[i].UpdatePosition(data::WIDTH, data::HEIGHT); } }
67e296769a9df166b01bd8b6dc332a4c3bb0127d.hip
// !!! This is a file automatically generated by hipify!!! /* * Copyright (c) 2020-2021, NVIDIA CORPORATION. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #include <cudf/column/column_device_view.cuh> #include <cudf/column/column_view.hpp> #include <cudf/detail/copy.hpp> #include <cudf/detail/iterator.cuh> #include <cudf/detail/utilities/cuda.cuh> #include <cudf/detail/utilities/device_operators.cuh> #include <cudf/dictionary/dictionary_column_view.hpp> #include <cudf/reduction.hpp> #include <cudf/scalar/scalar_factories.hpp> #include <rmm/cuda_stream_view.hpp> #include <thrust/iterator/counting_iterator.h> #include <thrust/transform_reduce.h> #include <type_traits> namespace cudf { namespace detail { namespace { /** * @brief Basic element for the minmax reduce operation. * * Stores the minimum and maximum values that have been encountered so far */ template <typename T> struct minmax_pair { T min_val; T max_val; __host__ __device__ minmax_pair() : min_val(cudf::DeviceMin::identity<T>()), max_val(cudf::DeviceMax::identity<T>()){}; __host__ __device__ minmax_pair(T val) : min_val(val), max_val(val){}; __host__ __device__ minmax_pair(T min_val_, T max_val_) : min_val(min_val_), max_val(max_val_){}; }; /** * @brief Reduce for the minmax operation and return a device scalar. * * @tparam Op Binary operator functor * @tparam InputIterator Input iterator Type * @param d_in input iterator * @param num_items number of items to reduce * @param binary_op binary operator used to reduce * @param mr Device resource used for result allocation * @param stream CUDA stream to run kernels on. * @return rmm::device_scalar<OutputType> */ template <typename T, typename Op, typename InputIterator, typename OutputType = typename thrust::iterator_value<InputIterator>::type> rmm::device_scalar<OutputType> reduce_device(InputIterator d_in, cudf::size_type num_items, Op binary_op, rmm::cuda_stream_view stream) { OutputType identity{}; rmm::device_scalar<OutputType> result{identity, stream}; // Allocate temporary storage size_t storage_bytes = 0; hipcub::DeviceReduce::Reduce( nullptr, storage_bytes, d_in, result.data(), num_items, binary_op, identity, stream.value()); auto temp_storage = rmm::device_buffer{storage_bytes, stream}; // Run reduction hipcub::DeviceReduce::Reduce(temp_storage.data(), storage_bytes, d_in, result.data(), num_items, binary_op, identity, stream.value()); return result; } /** * @brief Functor that accepts two minmax_pairs and returns a * minmax_pair whose minimum and maximum values are the min() and max() * respectively of the minimums and maximums of the input pairs. */ template <typename T> struct minmax_binary_op : public thrust::binary_function<minmax_pair<T>, minmax_pair<T>, minmax_pair<T>> { __device__ minmax_pair<T> operator()(minmax_pair<T> const& lhs, minmax_pair<T> const& rhs) const { return minmax_pair<T>{thrust::min(lhs.min_val, rhs.min_val), thrust::max(lhs.max_val, rhs.max_val)}; } }; /** * @brief Creates a minmax_pair<T> from a T */ template <typename T> struct create_minmax { __device__ minmax_pair<T> operator()(T e) { return minmax_pair<T>{e}; } }; /** * @brief Functor that takes a thrust::pair<T, bool> and produces a minmax_pair * that is <T, T> for minimum and maximum or <cudf::DeviceMin::identity<T>(), * cudf::DeviceMax::identity<T>()> */ template <typename T> struct create_minmax_with_nulls { __device__ minmax_pair<T> operator()(thrust::pair<T, bool> i) { return i.second ? minmax_pair<T>{i.first} : minmax_pair<T>{}; } }; /** * @brief Dispatch functor for minmax operation. * * This uses the reduce function to compute the min and max values * simultaneously for a column of data. * * @tparam T The input column's type */ struct minmax_functor { template <typename T> static constexpr bool is_supported() { return !(cudf::is_fixed_point<T>() || std::is_same<T, cudf::list_view>::value || std::is_same<T, cudf::struct_view>::value); } template <typename T> auto reduce(column_view const& col, rmm::cuda_stream_view stream) { auto device_col = column_device_view::create(col, stream); // compute minimum and maximum values if (col.has_nulls()) { auto pair_to_minmax = thrust::make_transform_iterator( make_pair_iterator<T, true>(*device_col), create_minmax_with_nulls<T>{}); return reduce_device<T>(pair_to_minmax, col.size(), minmax_binary_op<T>{}, stream); } else { auto col_to_minmax = thrust::make_transform_iterator(device_col->begin<T>(), create_minmax<T>{}); return reduce_device<T>(col_to_minmax, col.size(), minmax_binary_op<T>{}, stream); } } /** * @brief Functor to copy a minmax_pair result to individual scalar instances. */ template <typename T, typename ResultType = minmax_pair<T>> struct assign_min_max { __device__ void operator()() { *min_data = result->min_val; *max_data = result->max_val; } ResultType* result; T* min_data; T* max_data; }; template <typename T, std::enable_if_t<is_supported<T>() and !std::is_same<T, cudf::string_view>::value and !cudf::is_dictionary<T>()>* = nullptr> std::pair<std::unique_ptr<scalar>, std::unique_ptr<scalar>> operator()( cudf::column_view const& col, rmm::cuda_stream_view stream, rmm::mr::device_memory_resource* mr) { // compute minimum and maximum values auto dev_result = reduce<T>(col, stream); // create output scalars using ScalarType = cudf::scalar_type_t<T>; auto minimum = new ScalarType(T{}, true, stream, mr); auto maximum = new ScalarType(T{}, true, stream, mr); // copy dev_result to the output scalars device_single_thread(assign_min_max<T>{dev_result.data(), minimum->data(), maximum->data()}, stream); return {std::unique_ptr<scalar>(minimum), std::unique_ptr<scalar>(maximum)}; } /** * @brief Specialization for strings column. */ template <typename T, std::enable_if_t<std::is_same<T, cudf::string_view>::value>* = nullptr> std::pair<std::unique_ptr<scalar>, std::unique_ptr<scalar>> operator()( cudf::column_view const& col, rmm::cuda_stream_view stream, rmm::mr::device_memory_resource* mr) { // compute minimum and maximum values auto dev_result = reduce<cudf::string_view>(col, stream); // copy the minmax_pair to the host; does not copy the strings using OutputType = minmax_pair<cudf::string_view>; OutputType host_result; CUDA_TRY(hipMemcpyAsync( &host_result, dev_result.data(), sizeof(OutputType), hipMemcpyDeviceToHost, stream.value())); // strings are copied to create the scalars here return {std::make_unique<string_scalar>(host_result.min_val, true, stream, mr), std::make_unique<string_scalar>(host_result.max_val, true, stream, mr)}; } /** * @brief Specialization for dictionary column. */ template <typename T, std::enable_if_t<cudf::is_dictionary<T>()>* = nullptr> std::pair<std::unique_ptr<scalar>, std::unique_ptr<scalar>> operator()( cudf::column_view const& col, rmm::cuda_stream_view stream, rmm::mr::device_memory_resource* mr) { // compute minimum and maximum values auto dev_result = reduce<T>(col, stream); // copy the minmax_pair to the host to call get_element using OutputType = minmax_pair<T>; OutputType host_result; CUDA_TRY(hipMemcpyAsync( &host_result, dev_result.data(), sizeof(OutputType), hipMemcpyDeviceToHost, stream.value())); // get the keys for those indexes auto const keys = dictionary_column_view(col).keys(); return {get_element(keys, static_cast<size_type>(host_result.min_val), stream, mr), get_element(keys, static_cast<size_type>(host_result.max_val), stream, mr)}; } template <typename T, std::enable_if_t<!is_supported<T>()>* = nullptr> std::pair<std::unique_ptr<scalar>, std::unique_ptr<scalar>> operator()( cudf::column_view const&, rmm::cuda_stream_view, rmm::mr::device_memory_resource*) { CUDF_FAIL("type not supported for minmax() operation"); } }; } // namespace std::pair<std::unique_ptr<scalar>, std::unique_ptr<scalar>> minmax( cudf::column_view const& col, rmm::cuda_stream_view stream, rmm::mr::device_memory_resource* mr) { if (col.null_count() == col.size()) { // this handles empty and all-null columns // return scalars with valid==false return {make_default_constructed_scalar(col.type(), stream, mr), make_default_constructed_scalar(col.type(), stream, mr)}; } return type_dispatcher(col.type(), minmax_functor{}, col, stream, mr); } } // namespace detail /** * @copydoc cudf::minmax */ std::pair<std::unique_ptr<scalar>, std::unique_ptr<scalar>> minmax( const column_view& col, rmm::mr::device_memory_resource* mr) { return detail::minmax(col, rmm::cuda_stream_default, mr); } } // namespace cudf
67e296769a9df166b01bd8b6dc332a4c3bb0127d.cu
/* * Copyright (c) 2020-2021, NVIDIA CORPORATION. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #include <cudf/column/column_device_view.cuh> #include <cudf/column/column_view.hpp> #include <cudf/detail/copy.hpp> #include <cudf/detail/iterator.cuh> #include <cudf/detail/utilities/cuda.cuh> #include <cudf/detail/utilities/device_operators.cuh> #include <cudf/dictionary/dictionary_column_view.hpp> #include <cudf/reduction.hpp> #include <cudf/scalar/scalar_factories.hpp> #include <rmm/cuda_stream_view.hpp> #include <thrust/iterator/counting_iterator.h> #include <thrust/transform_reduce.h> #include <type_traits> namespace cudf { namespace detail { namespace { /** * @brief Basic element for the minmax reduce operation. * * Stores the minimum and maximum values that have been encountered so far */ template <typename T> struct minmax_pair { T min_val; T max_val; __host__ __device__ minmax_pair() : min_val(cudf::DeviceMin::identity<T>()), max_val(cudf::DeviceMax::identity<T>()){}; __host__ __device__ minmax_pair(T val) : min_val(val), max_val(val){}; __host__ __device__ minmax_pair(T min_val_, T max_val_) : min_val(min_val_), max_val(max_val_){}; }; /** * @brief Reduce for the minmax operation and return a device scalar. * * @tparam Op Binary operator functor * @tparam InputIterator Input iterator Type * @param d_in input iterator * @param num_items number of items to reduce * @param binary_op binary operator used to reduce * @param mr Device resource used for result allocation * @param stream CUDA stream to run kernels on. * @return rmm::device_scalar<OutputType> */ template <typename T, typename Op, typename InputIterator, typename OutputType = typename thrust::iterator_value<InputIterator>::type> rmm::device_scalar<OutputType> reduce_device(InputIterator d_in, cudf::size_type num_items, Op binary_op, rmm::cuda_stream_view stream) { OutputType identity{}; rmm::device_scalar<OutputType> result{identity, stream}; // Allocate temporary storage size_t storage_bytes = 0; cub::DeviceReduce::Reduce( nullptr, storage_bytes, d_in, result.data(), num_items, binary_op, identity, stream.value()); auto temp_storage = rmm::device_buffer{storage_bytes, stream}; // Run reduction cub::DeviceReduce::Reduce(temp_storage.data(), storage_bytes, d_in, result.data(), num_items, binary_op, identity, stream.value()); return result; } /** * @brief Functor that accepts two minmax_pairs and returns a * minmax_pair whose minimum and maximum values are the min() and max() * respectively of the minimums and maximums of the input pairs. */ template <typename T> struct minmax_binary_op : public thrust::binary_function<minmax_pair<T>, minmax_pair<T>, minmax_pair<T>> { __device__ minmax_pair<T> operator()(minmax_pair<T> const& lhs, minmax_pair<T> const& rhs) const { return minmax_pair<T>{thrust::min(lhs.min_val, rhs.min_val), thrust::max(lhs.max_val, rhs.max_val)}; } }; /** * @brief Creates a minmax_pair<T> from a T */ template <typename T> struct create_minmax { __device__ minmax_pair<T> operator()(T e) { return minmax_pair<T>{e}; } }; /** * @brief Functor that takes a thrust::pair<T, bool> and produces a minmax_pair * that is <T, T> for minimum and maximum or <cudf::DeviceMin::identity<T>(), * cudf::DeviceMax::identity<T>()> */ template <typename T> struct create_minmax_with_nulls { __device__ minmax_pair<T> operator()(thrust::pair<T, bool> i) { return i.second ? minmax_pair<T>{i.first} : minmax_pair<T>{}; } }; /** * @brief Dispatch functor for minmax operation. * * This uses the reduce function to compute the min and max values * simultaneously for a column of data. * * @tparam T The input column's type */ struct minmax_functor { template <typename T> static constexpr bool is_supported() { return !(cudf::is_fixed_point<T>() || std::is_same<T, cudf::list_view>::value || std::is_same<T, cudf::struct_view>::value); } template <typename T> auto reduce(column_view const& col, rmm::cuda_stream_view stream) { auto device_col = column_device_view::create(col, stream); // compute minimum and maximum values if (col.has_nulls()) { auto pair_to_minmax = thrust::make_transform_iterator( make_pair_iterator<T, true>(*device_col), create_minmax_with_nulls<T>{}); return reduce_device<T>(pair_to_minmax, col.size(), minmax_binary_op<T>{}, stream); } else { auto col_to_minmax = thrust::make_transform_iterator(device_col->begin<T>(), create_minmax<T>{}); return reduce_device<T>(col_to_minmax, col.size(), minmax_binary_op<T>{}, stream); } } /** * @brief Functor to copy a minmax_pair result to individual scalar instances. */ template <typename T, typename ResultType = minmax_pair<T>> struct assign_min_max { __device__ void operator()() { *min_data = result->min_val; *max_data = result->max_val; } ResultType* result; T* min_data; T* max_data; }; template <typename T, std::enable_if_t<is_supported<T>() and !std::is_same<T, cudf::string_view>::value and !cudf::is_dictionary<T>()>* = nullptr> std::pair<std::unique_ptr<scalar>, std::unique_ptr<scalar>> operator()( cudf::column_view const& col, rmm::cuda_stream_view stream, rmm::mr::device_memory_resource* mr) { // compute minimum and maximum values auto dev_result = reduce<T>(col, stream); // create output scalars using ScalarType = cudf::scalar_type_t<T>; auto minimum = new ScalarType(T{}, true, stream, mr); auto maximum = new ScalarType(T{}, true, stream, mr); // copy dev_result to the output scalars device_single_thread(assign_min_max<T>{dev_result.data(), minimum->data(), maximum->data()}, stream); return {std::unique_ptr<scalar>(minimum), std::unique_ptr<scalar>(maximum)}; } /** * @brief Specialization for strings column. */ template <typename T, std::enable_if_t<std::is_same<T, cudf::string_view>::value>* = nullptr> std::pair<std::unique_ptr<scalar>, std::unique_ptr<scalar>> operator()( cudf::column_view const& col, rmm::cuda_stream_view stream, rmm::mr::device_memory_resource* mr) { // compute minimum and maximum values auto dev_result = reduce<cudf::string_view>(col, stream); // copy the minmax_pair to the host; does not copy the strings using OutputType = minmax_pair<cudf::string_view>; OutputType host_result; CUDA_TRY(cudaMemcpyAsync( &host_result, dev_result.data(), sizeof(OutputType), cudaMemcpyDeviceToHost, stream.value())); // strings are copied to create the scalars here return {std::make_unique<string_scalar>(host_result.min_val, true, stream, mr), std::make_unique<string_scalar>(host_result.max_val, true, stream, mr)}; } /** * @brief Specialization for dictionary column. */ template <typename T, std::enable_if_t<cudf::is_dictionary<T>()>* = nullptr> std::pair<std::unique_ptr<scalar>, std::unique_ptr<scalar>> operator()( cudf::column_view const& col, rmm::cuda_stream_view stream, rmm::mr::device_memory_resource* mr) { // compute minimum and maximum values auto dev_result = reduce<T>(col, stream); // copy the minmax_pair to the host to call get_element using OutputType = minmax_pair<T>; OutputType host_result; CUDA_TRY(cudaMemcpyAsync( &host_result, dev_result.data(), sizeof(OutputType), cudaMemcpyDeviceToHost, stream.value())); // get the keys for those indexes auto const keys = dictionary_column_view(col).keys(); return {get_element(keys, static_cast<size_type>(host_result.min_val), stream, mr), get_element(keys, static_cast<size_type>(host_result.max_val), stream, mr)}; } template <typename T, std::enable_if_t<!is_supported<T>()>* = nullptr> std::pair<std::unique_ptr<scalar>, std::unique_ptr<scalar>> operator()( cudf::column_view const&, rmm::cuda_stream_view, rmm::mr::device_memory_resource*) { CUDF_FAIL("type not supported for minmax() operation"); } }; } // namespace std::pair<std::unique_ptr<scalar>, std::unique_ptr<scalar>> minmax( cudf::column_view const& col, rmm::cuda_stream_view stream, rmm::mr::device_memory_resource* mr) { if (col.null_count() == col.size()) { // this handles empty and all-null columns // return scalars with valid==false return {make_default_constructed_scalar(col.type(), stream, mr), make_default_constructed_scalar(col.type(), stream, mr)}; } return type_dispatcher(col.type(), minmax_functor{}, col, stream, mr); } } // namespace detail /** * @copydoc cudf::minmax */ std::pair<std::unique_ptr<scalar>, std::unique_ptr<scalar>> minmax( const column_view& col, rmm::mr::device_memory_resource* mr) { return detail::minmax(col, rmm::cuda_stream_default, mr); } } // namespace cudf
5204268b3ee531086cbd8f9279bb29ab212f57e8.hip
// !!! This is a file automatically generated by hipify!!! /* * * Copyright (c) 2015, Facebook, Inc. All rights reserved. * * Licensed under the Creative Commons Attribution-NonCommercial 3.0 * License (the "License"). You may obtain a copy of the License at * https://creativecommons.org/licenses/by-nc/3.0/. * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the * License for the specific language governing permissions and limitations * under the License. * * */ #include <vector> #include "caffe/layer.hpp" #include "caffe/video_3d_layers.hpp" #include "caffe/util/vol2col.hpp" #include "caffe/filler.hpp" #include "caffe/util/math_functions.hpp" namespace caffe { template <typename Dtype> Dtype Convolution3DLayer<Dtype>::Forward_gpu(const vector<Blob<Dtype>*>& bottom, vector<Blob<Dtype>*>* top) { const Dtype* bottom_data = bottom[0]->gpu_data(); Dtype* top_data = (*top)[0]->mutable_gpu_data(); Dtype* col_data = col_buffer_.mutable_gpu_data(); const Dtype* weight = this->blobs_[0]->gpu_data(); int weight_offset = M_ * K_; int top_offset = M_ * N_; for (int n = 0; n < num_; ++n) { // First, im2col vol2col_gpu(bottom_data + bottom[0]->offset(n), channels_, length_, height_, width_, kernel_size_, kernel_depth_, pad_, temporal_pad_, stride_, temporal_stride_, col_data); // Second, innerproduct with groups for (int g=0; g<filter_group_; ++g){ caffe_gpu_gemm<Dtype>(CblasNoTrans, CblasNoTrans, M_, N_, K_, (Dtype)1., weight + g * weight_offset, col_data, (Dtype)0., top_data + (*top)[0]->offset(n) + g * top_offset); } // third, add bias if (bias_term_) { caffe_gpu_gemm<Dtype>(CblasNoTrans, CblasNoTrans, num_output_, N_, 1, (Dtype)1., this->blobs_[1]->gpu_data(), reinterpret_cast<const Dtype*>(bias_multiplier_->gpu_data()), (Dtype)1., top_data + (*top)[0]->offset(n)); } } return Dtype(0.); } template <typename Dtype> void Convolution3DLayer<Dtype>::Backward_gpu(const vector<Blob<Dtype>*>& top, const bool propagate_down, vector<Blob<Dtype>*>* bottom) { const Dtype* top_diff = top[0]->gpu_diff(); const Dtype* weight = this->blobs_[0]->gpu_data(); Dtype* weight_diff = this->blobs_[0]->mutable_gpu_diff(); const Dtype* bottom_data = (*bottom)[0]->gpu_data(); Dtype* bottom_diff = (*bottom)[0]->mutable_gpu_diff(); Dtype* col_data = col_buffer_.mutable_gpu_data(); // bias gradient if necessary Dtype* bias_diff = NULL; if (bias_term_) { bias_diff = this->blobs_[1]->mutable_gpu_diff(); CUDA_CHECK(hipMemset(bias_diff, 0, sizeof(Dtype) * this->blobs_[1]->count())); for (int n = 0; n < num_; ++n) { caffe_gpu_gemv<Dtype>(CblasNoTrans, num_output_, N_, 1., top_diff + top[0]->offset(n), reinterpret_cast<const Dtype*>(bias_multiplier_->gpu_data()), 1., bias_diff); } } int weight_offset = M_ * K_; int top_offset = M_ * N_; CUDA_CHECK(hipMemset(weight_diff, 0, sizeof(Dtype) * this->blobs_[0]->count())); for (int n = 0; n < num_; ++n) { // since we saved memory in the forward pass by not storing all col data, // we will need to recompute them. vol2col_gpu(bottom_data + (*bottom)[0]->offset(n), channels_, length_, height_, width_, kernel_size_, kernel_depth_, pad_, temporal_pad_, stride_, temporal_stride_, col_data); // gradient w.r.t. weight. Note that we will accumulate diffs. for (int g=0; g<filter_group_; ++g) { caffe_gpu_gemm<Dtype>(CblasNoTrans, CblasTrans, M_, K_, N_, (Dtype)1., top_diff + top[0]->offset(n) + g * top_offset, col_data, (Dtype)1., weight_diff + g * weight_offset); } // gradient w.r.t. bottom data, if necessary if (propagate_down) { caffe_gpu_gemm<Dtype>(CblasTrans, CblasNoTrans, K_, N_, M_, (Dtype)1., weight, top_diff + top[0]->offset(n), (Dtype)0., col_data); for (int g=1; g<filter_group_; ++g) { caffe_gpu_gemm<Dtype>(CblasTrans, CblasNoTrans, K_, N_, M_, (Dtype)1., weight + g * weight_offset, top_diff + top[0]->offset(n) + g * top_offset, (Dtype)1., col_data); } // col2vol back to the data col2vol_gpu(col_data, channels_, length_, height_, width_, kernel_size_, kernel_depth_, pad_, temporal_pad_, stride_, temporal_stride_, bottom_diff + (*bottom)[0]->offset(n)); } } } INSTANTIATE_CLASS(Convolution3DLayer); } // namespace caffe
5204268b3ee531086cbd8f9279bb29ab212f57e8.cu
/* * * Copyright (c) 2015, Facebook, Inc. All rights reserved. * * Licensed under the Creative Commons Attribution-NonCommercial 3.0 * License (the "License"). You may obtain a copy of the License at * https://creativecommons.org/licenses/by-nc/3.0/. * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the * License for the specific language governing permissions and limitations * under the License. * * */ #include <vector> #include "caffe/layer.hpp" #include "caffe/video_3d_layers.hpp" #include "caffe/util/vol2col.hpp" #include "caffe/filler.hpp" #include "caffe/util/math_functions.hpp" namespace caffe { template <typename Dtype> Dtype Convolution3DLayer<Dtype>::Forward_gpu(const vector<Blob<Dtype>*>& bottom, vector<Blob<Dtype>*>* top) { const Dtype* bottom_data = bottom[0]->gpu_data(); Dtype* top_data = (*top)[0]->mutable_gpu_data(); Dtype* col_data = col_buffer_.mutable_gpu_data(); const Dtype* weight = this->blobs_[0]->gpu_data(); int weight_offset = M_ * K_; int top_offset = M_ * N_; for (int n = 0; n < num_; ++n) { // First, im2col vol2col_gpu(bottom_data + bottom[0]->offset(n), channels_, length_, height_, width_, kernel_size_, kernel_depth_, pad_, temporal_pad_, stride_, temporal_stride_, col_data); // Second, innerproduct with groups for (int g=0; g<filter_group_; ++g){ caffe_gpu_gemm<Dtype>(CblasNoTrans, CblasNoTrans, M_, N_, K_, (Dtype)1., weight + g * weight_offset, col_data, (Dtype)0., top_data + (*top)[0]->offset(n) + g * top_offset); } // third, add bias if (bias_term_) { caffe_gpu_gemm<Dtype>(CblasNoTrans, CblasNoTrans, num_output_, N_, 1, (Dtype)1., this->blobs_[1]->gpu_data(), reinterpret_cast<const Dtype*>(bias_multiplier_->gpu_data()), (Dtype)1., top_data + (*top)[0]->offset(n)); } } return Dtype(0.); } template <typename Dtype> void Convolution3DLayer<Dtype>::Backward_gpu(const vector<Blob<Dtype>*>& top, const bool propagate_down, vector<Blob<Dtype>*>* bottom) { const Dtype* top_diff = top[0]->gpu_diff(); const Dtype* weight = this->blobs_[0]->gpu_data(); Dtype* weight_diff = this->blobs_[0]->mutable_gpu_diff(); const Dtype* bottom_data = (*bottom)[0]->gpu_data(); Dtype* bottom_diff = (*bottom)[0]->mutable_gpu_diff(); Dtype* col_data = col_buffer_.mutable_gpu_data(); // bias gradient if necessary Dtype* bias_diff = NULL; if (bias_term_) { bias_diff = this->blobs_[1]->mutable_gpu_diff(); CUDA_CHECK(cudaMemset(bias_diff, 0, sizeof(Dtype) * this->blobs_[1]->count())); for (int n = 0; n < num_; ++n) { caffe_gpu_gemv<Dtype>(CblasNoTrans, num_output_, N_, 1., top_diff + top[0]->offset(n), reinterpret_cast<const Dtype*>(bias_multiplier_->gpu_data()), 1., bias_diff); } } int weight_offset = M_ * K_; int top_offset = M_ * N_; CUDA_CHECK(cudaMemset(weight_diff, 0, sizeof(Dtype) * this->blobs_[0]->count())); for (int n = 0; n < num_; ++n) { // since we saved memory in the forward pass by not storing all col data, // we will need to recompute them. vol2col_gpu(bottom_data + (*bottom)[0]->offset(n), channels_, length_, height_, width_, kernel_size_, kernel_depth_, pad_, temporal_pad_, stride_, temporal_stride_, col_data); // gradient w.r.t. weight. Note that we will accumulate diffs. for (int g=0; g<filter_group_; ++g) { caffe_gpu_gemm<Dtype>(CblasNoTrans, CblasTrans, M_, K_, N_, (Dtype)1., top_diff + top[0]->offset(n) + g * top_offset, col_data, (Dtype)1., weight_diff + g * weight_offset); } // gradient w.r.t. bottom data, if necessary if (propagate_down) { caffe_gpu_gemm<Dtype>(CblasTrans, CblasNoTrans, K_, N_, M_, (Dtype)1., weight, top_diff + top[0]->offset(n), (Dtype)0., col_data); for (int g=1; g<filter_group_; ++g) { caffe_gpu_gemm<Dtype>(CblasTrans, CblasNoTrans, K_, N_, M_, (Dtype)1., weight + g * weight_offset, top_diff + top[0]->offset(n) + g * top_offset, (Dtype)1., col_data); } // col2vol back to the data col2vol_gpu(col_data, channels_, length_, height_, width_, kernel_size_, kernel_depth_, pad_, temporal_pad_, stride_, temporal_stride_, bottom_diff + (*bottom)[0]->offset(n)); } } } INSTANTIATE_CLASS(Convolution3DLayer); } // namespace caffe
2731e01beda1eb1f1994cdc6bf83a29255be6672.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" // // auto-generated by ops.py // __constant__ int xdim0_initialise_chunk_kernel_volume; int xdim0_initialise_chunk_kernel_volume_h = -1; __constant__ int ydim0_initialise_chunk_kernel_volume; int ydim0_initialise_chunk_kernel_volume_h = -1; __constant__ int xdim1_initialise_chunk_kernel_volume; int xdim1_initialise_chunk_kernel_volume_h = -1; __constant__ int ydim1_initialise_chunk_kernel_volume; int ydim1_initialise_chunk_kernel_volume_h = -1; __constant__ int xdim2_initialise_chunk_kernel_volume; int xdim2_initialise_chunk_kernel_volume_h = -1; __constant__ int ydim2_initialise_chunk_kernel_volume; int ydim2_initialise_chunk_kernel_volume_h = -1; __constant__ int xdim3_initialise_chunk_kernel_volume; int xdim3_initialise_chunk_kernel_volume_h = -1; __constant__ int ydim3_initialise_chunk_kernel_volume; int ydim3_initialise_chunk_kernel_volume_h = -1; __constant__ int xdim4_initialise_chunk_kernel_volume; int xdim4_initialise_chunk_kernel_volume_h = -1; __constant__ int ydim4_initialise_chunk_kernel_volume; int ydim4_initialise_chunk_kernel_volume_h = -1; __constant__ int xdim5_initialise_chunk_kernel_volume; int xdim5_initialise_chunk_kernel_volume_h = -1; __constant__ int ydim5_initialise_chunk_kernel_volume; int ydim5_initialise_chunk_kernel_volume_h = -1; __constant__ int xdim6_initialise_chunk_kernel_volume; int xdim6_initialise_chunk_kernel_volume_h = -1; __constant__ int ydim6_initialise_chunk_kernel_volume; int ydim6_initialise_chunk_kernel_volume_h = -1; #undef OPS_ACC0 #undef OPS_ACC1 #undef OPS_ACC2 #undef OPS_ACC3 #undef OPS_ACC4 #undef OPS_ACC5 #undef OPS_ACC6 #define OPS_ACC0(x, y, z) \ (x + xdim0_initialise_chunk_kernel_volume * (y) + \ xdim0_initialise_chunk_kernel_volume * \ ydim0_initialise_chunk_kernel_volume * (z)) #define OPS_ACC1(x, y, z) \ (x + xdim1_initialise_chunk_kernel_volume * (y) + \ xdim1_initialise_chunk_kernel_volume * \ ydim1_initialise_chunk_kernel_volume * (z)) #define OPS_ACC2(x, y, z) \ (x + xdim2_initialise_chunk_kernel_volume * (y) + \ xdim2_initialise_chunk_kernel_volume * \ ydim2_initialise_chunk_kernel_volume * (z)) #define OPS_ACC3(x, y, z) \ (x + xdim3_initialise_chunk_kernel_volume * (y) + \ xdim3_initialise_chunk_kernel_volume * \ ydim3_initialise_chunk_kernel_volume * (z)) #define OPS_ACC4(x, y, z) \ (x + xdim4_initialise_chunk_kernel_volume * (y) + \ xdim4_initialise_chunk_kernel_volume * \ ydim4_initialise_chunk_kernel_volume * (z)) #define OPS_ACC5(x, y, z) \ (x + xdim5_initialise_chunk_kernel_volume * (y) + \ xdim5_initialise_chunk_kernel_volume * \ ydim5_initialise_chunk_kernel_volume * (z)) #define OPS_ACC6(x, y, z) \ (x + xdim6_initialise_chunk_kernel_volume * (y) + \ xdim6_initialise_chunk_kernel_volume * \ ydim6_initialise_chunk_kernel_volume * (z)) // user function __device__ void initialise_chunk_kernel_volume(double *volume, const double *celldy, double *xarea, const double *celldx, double *yarea, const double *celldz, double *zarea) { double d_x, d_y, d_z; d_x = (grid.xmax - grid.xmin) / (double)grid.x_cells; d_y = (grid.ymax - grid.ymin) / (double)grid.y_cells; d_z = (grid.zmax - grid.zmin) / (double)grid.z_cells; volume[OPS_ACC0(0, 0, 0)] = d_x * d_y * d_z; xarea[OPS_ACC2(0, 0, 0)] = celldy[OPS_ACC1(0, 0, 0)] * celldz[OPS_ACC5(0, 0, 0)]; yarea[OPS_ACC4(0, 0, 0)] = celldx[OPS_ACC3(0, 0, 0)] * celldz[OPS_ACC5(0, 0, 0)]; zarea[OPS_ACC6(0, 0, 0)] = celldx[OPS_ACC3(0, 0, 0)] * celldy[OPS_ACC1(0, 0, 0)]; } #undef OPS_ACC0 #undef OPS_ACC1 #undef OPS_ACC2 #undef OPS_ACC3 #undef OPS_ACC4 #undef OPS_ACC5 #undef OPS_ACC6 __global__ void ops_initialise_chunk_kernel_volume( double *__restrict arg0, const double *__restrict arg1, double *__restrict arg2, const double *__restrict arg3, double *__restrict arg4, const double *__restrict arg5, double *__restrict arg6, int size0, int size1, int size2) { int idx_z = blockDim.z * blockIdx.z + threadIdx.z; int idx_y = blockDim.y * blockIdx.y + threadIdx.y; int idx_x = blockDim.x * blockIdx.x + threadIdx.x; arg0 += idx_x * 1 * 1 + idx_y * 1 * 1 * xdim0_initialise_chunk_kernel_volume + idx_z * 1 * 1 * xdim0_initialise_chunk_kernel_volume * ydim0_initialise_chunk_kernel_volume; arg1 += idx_x * 0 * 1 + idx_y * 1 * 1 * xdim1_initialise_chunk_kernel_volume + idx_z * 0 * 1 * xdim1_initialise_chunk_kernel_volume * ydim1_initialise_chunk_kernel_volume; arg2 += idx_x * 1 * 1 + idx_y * 1 * 1 * xdim2_initialise_chunk_kernel_volume + idx_z * 1 * 1 * xdim2_initialise_chunk_kernel_volume * ydim2_initialise_chunk_kernel_volume; arg3 += idx_x * 1 * 1 + idx_y * 0 * 1 * xdim3_initialise_chunk_kernel_volume + idx_z * 0 * 1 * xdim3_initialise_chunk_kernel_volume * ydim3_initialise_chunk_kernel_volume; arg4 += idx_x * 1 * 1 + idx_y * 1 * 1 * xdim4_initialise_chunk_kernel_volume + idx_z * 1 * 1 * xdim4_initialise_chunk_kernel_volume * ydim4_initialise_chunk_kernel_volume; arg5 += idx_x * 0 * 1 + idx_y * 0 * 1 * xdim5_initialise_chunk_kernel_volume + idx_z * 1 * 1 * xdim5_initialise_chunk_kernel_volume * ydim5_initialise_chunk_kernel_volume; arg6 += idx_x * 1 * 1 + idx_y * 1 * 1 * xdim6_initialise_chunk_kernel_volume + idx_z * 1 * 1 * xdim6_initialise_chunk_kernel_volume * ydim6_initialise_chunk_kernel_volume; if (idx_x < size0 && idx_y < size1 && idx_z < size2) { initialise_chunk_kernel_volume(arg0, arg1, arg2, arg3, arg4, arg5, arg6); } } // host stub function void ops_par_loop_initialise_chunk_kernel_volume(char const *name, ops_block block, int dim, int *range, ops_arg arg0, ops_arg arg1, ops_arg arg2, ops_arg arg3, ops_arg arg4, ops_arg arg5, ops_arg arg6) { // Timing double t1, t2, c1, c2; ops_arg args[7] = {arg0, arg1, arg2, arg3, arg4, arg5, arg6}; #ifdef CHECKPOINTING if (!ops_checkpointing_before(args, 7, range, 55)) return; #endif if (OPS_diags > 1) { ops_timing_realloc(55, "initialise_chunk_kernel_volume"); OPS_kernels[55].count++; ops_timers_core(&c1, &t1); } // compute locally allocated range for the sub-block int start[3]; int end[3]; #ifdef OPS_MPI sub_block_list sb = OPS_sub_block_list[block->index]; if (!sb->owned) return; for (int n = 0; n < 3; n++) { start[n] = sb->decomp_disp[n]; end[n] = sb->decomp_disp[n] + sb->decomp_size[n]; if (start[n] >= range[2 * n]) { start[n] = 0; } else { start[n] = range[2 * n] - start[n]; } if (sb->id_m[n] == MPI_PROC_NULL && range[2 * n] < 0) start[n] = range[2 * n]; if (end[n] >= range[2 * n + 1]) { end[n] = range[2 * n + 1] - sb->decomp_disp[n]; } else { end[n] = sb->decomp_size[n]; } if (sb->id_p[n] == MPI_PROC_NULL && (range[2 * n + 1] > sb->decomp_disp[n] + sb->decomp_size[n])) end[n] += (range[2 * n + 1] - sb->decomp_disp[n] - sb->decomp_size[n]); } #else for (int n = 0; n < 3; n++) { start[n] = range[2 * n]; end[n] = range[2 * n + 1]; } #endif int x_size = MAX(0, end[0] - start[0]); int y_size = MAX(0, end[1] - start[1]); int z_size = MAX(0, end[2] - start[2]); int xdim0 = args[0].dat->size[0]; int ydim0 = args[0].dat->size[1]; int xdim1 = args[1].dat->size[0]; int ydim1 = args[1].dat->size[1]; int xdim2 = args[2].dat->size[0]; int ydim2 = args[2].dat->size[1]; int xdim3 = args[3].dat->size[0]; int ydim3 = args[3].dat->size[1]; int xdim4 = args[4].dat->size[0]; int ydim4 = args[4].dat->size[1]; int xdim5 = args[5].dat->size[0]; int ydim5 = args[5].dat->size[1]; int xdim6 = args[6].dat->size[0]; int ydim6 = args[6].dat->size[1]; if (xdim0 != xdim0_initialise_chunk_kernel_volume_h || ydim0 != ydim0_initialise_chunk_kernel_volume_h || xdim1 != xdim1_initialise_chunk_kernel_volume_h || ydim1 != ydim1_initialise_chunk_kernel_volume_h || xdim2 != xdim2_initialise_chunk_kernel_volume_h || ydim2 != ydim2_initialise_chunk_kernel_volume_h || xdim3 != xdim3_initialise_chunk_kernel_volume_h || ydim3 != ydim3_initialise_chunk_kernel_volume_h || xdim4 != xdim4_initialise_chunk_kernel_volume_h || ydim4 != ydim4_initialise_chunk_kernel_volume_h || xdim5 != xdim5_initialise_chunk_kernel_volume_h || ydim5 != ydim5_initialise_chunk_kernel_volume_h || xdim6 != xdim6_initialise_chunk_kernel_volume_h || ydim6 != ydim6_initialise_chunk_kernel_volume_h) { hipMemcpyToSymbol(xdim0_initialise_chunk_kernel_volume, &xdim0, sizeof(int)); xdim0_initialise_chunk_kernel_volume_h = xdim0; hipMemcpyToSymbol(ydim0_initialise_chunk_kernel_volume, &ydim0, sizeof(int)); ydim0_initialise_chunk_kernel_volume_h = ydim0; hipMemcpyToSymbol(xdim1_initialise_chunk_kernel_volume, &xdim1, sizeof(int)); xdim1_initialise_chunk_kernel_volume_h = xdim1; hipMemcpyToSymbol(ydim1_initialise_chunk_kernel_volume, &ydim1, sizeof(int)); ydim1_initialise_chunk_kernel_volume_h = ydim1; hipMemcpyToSymbol(xdim2_initialise_chunk_kernel_volume, &xdim2, sizeof(int)); xdim2_initialise_chunk_kernel_volume_h = xdim2; hipMemcpyToSymbol(ydim2_initialise_chunk_kernel_volume, &ydim2, sizeof(int)); ydim2_initialise_chunk_kernel_volume_h = ydim2; hipMemcpyToSymbol(xdim3_initialise_chunk_kernel_volume, &xdim3, sizeof(int)); xdim3_initialise_chunk_kernel_volume_h = xdim3; hipMemcpyToSymbol(ydim3_initialise_chunk_kernel_volume, &ydim3, sizeof(int)); ydim3_initialise_chunk_kernel_volume_h = ydim3; hipMemcpyToSymbol(xdim4_initialise_chunk_kernel_volume, &xdim4, sizeof(int)); xdim4_initialise_chunk_kernel_volume_h = xdim4; hipMemcpyToSymbol(ydim4_initialise_chunk_kernel_volume, &ydim4, sizeof(int)); ydim4_initialise_chunk_kernel_volume_h = ydim4; hipMemcpyToSymbol(xdim5_initialise_chunk_kernel_volume, &xdim5, sizeof(int)); xdim5_initialise_chunk_kernel_volume_h = xdim5; hipMemcpyToSymbol(ydim5_initialise_chunk_kernel_volume, &ydim5, sizeof(int)); ydim5_initialise_chunk_kernel_volume_h = ydim5; hipMemcpyToSymbol(xdim6_initialise_chunk_kernel_volume, &xdim6, sizeof(int)); xdim6_initialise_chunk_kernel_volume_h = xdim6; hipMemcpyToSymbol(ydim6_initialise_chunk_kernel_volume, &ydim6, sizeof(int)); ydim6_initialise_chunk_kernel_volume_h = ydim6; } dim3 grid((x_size - 1) / OPS_block_size_x + 1, (y_size - 1) / OPS_block_size_y + 1, z_size); dim3 tblock(OPS_block_size_x, OPS_block_size_y, 1); int dat0 = args[0].dat->elem_size; int dat1 = args[1].dat->elem_size; int dat2 = args[2].dat->elem_size; int dat3 = args[3].dat->elem_size; int dat4 = args[4].dat->elem_size; int dat5 = args[5].dat->elem_size; int dat6 = args[6].dat->elem_size; char *p_a[7]; // set up initial pointers int d_m[OPS_MAX_DIM]; #ifdef OPS_MPI for (int d = 0; d < dim; d++) d_m[d] = args[0].dat->d_m[d] + OPS_sub_dat_list[args[0].dat->index]->d_im[d]; #else for (int d = 0; d < dim; d++) d_m[d] = args[0].dat->d_m[d]; #endif int base0 = dat0 * 1 * (start[0] * args[0].stencil->stride[0] - args[0].dat->base[0] - d_m[0]); base0 = base0 + dat0 * args[0].dat->size[0] * (start[1] * args[0].stencil->stride[1] - args[0].dat->base[1] - d_m[1]); base0 = base0 + dat0 * args[0].dat->size[0] * args[0].dat->size[1] * (start[2] * args[0].stencil->stride[2] - args[0].dat->base[2] - d_m[2]); p_a[0] = (char *)args[0].data_d + base0; #ifdef OPS_MPI for (int d = 0; d < dim; d++) d_m[d] = args[1].dat->d_m[d] + OPS_sub_dat_list[args[1].dat->index]->d_im[d]; #else for (int d = 0; d < dim; d++) d_m[d] = args[1].dat->d_m[d]; #endif int base1 = dat1 * 1 * (start[0] * args[1].stencil->stride[0] - args[1].dat->base[0] - d_m[0]); base1 = base1 + dat1 * args[1].dat->size[0] * (start[1] * args[1].stencil->stride[1] - args[1].dat->base[1] - d_m[1]); base1 = base1 + dat1 * args[1].dat->size[0] * args[1].dat->size[1] * (start[2] * args[1].stencil->stride[2] - args[1].dat->base[2] - d_m[2]); p_a[1] = (char *)args[1].data_d + base1; #ifdef OPS_MPI for (int d = 0; d < dim; d++) d_m[d] = args[2].dat->d_m[d] + OPS_sub_dat_list[args[2].dat->index]->d_im[d]; #else for (int d = 0; d < dim; d++) d_m[d] = args[2].dat->d_m[d]; #endif int base2 = dat2 * 1 * (start[0] * args[2].stencil->stride[0] - args[2].dat->base[0] - d_m[0]); base2 = base2 + dat2 * args[2].dat->size[0] * (start[1] * args[2].stencil->stride[1] - args[2].dat->base[1] - d_m[1]); base2 = base2 + dat2 * args[2].dat->size[0] * args[2].dat->size[1] * (start[2] * args[2].stencil->stride[2] - args[2].dat->base[2] - d_m[2]); p_a[2] = (char *)args[2].data_d + base2; #ifdef OPS_MPI for (int d = 0; d < dim; d++) d_m[d] = args[3].dat->d_m[d] + OPS_sub_dat_list[args[3].dat->index]->d_im[d]; #else for (int d = 0; d < dim; d++) d_m[d] = args[3].dat->d_m[d]; #endif int base3 = dat3 * 1 * (start[0] * args[3].stencil->stride[0] - args[3].dat->base[0] - d_m[0]); base3 = base3 + dat3 * args[3].dat->size[0] * (start[1] * args[3].stencil->stride[1] - args[3].dat->base[1] - d_m[1]); base3 = base3 + dat3 * args[3].dat->size[0] * args[3].dat->size[1] * (start[2] * args[3].stencil->stride[2] - args[3].dat->base[2] - d_m[2]); p_a[3] = (char *)args[3].data_d + base3; #ifdef OPS_MPI for (int d = 0; d < dim; d++) d_m[d] = args[4].dat->d_m[d] + OPS_sub_dat_list[args[4].dat->index]->d_im[d]; #else for (int d = 0; d < dim; d++) d_m[d] = args[4].dat->d_m[d]; #endif int base4 = dat4 * 1 * (start[0] * args[4].stencil->stride[0] - args[4].dat->base[0] - d_m[0]); base4 = base4 + dat4 * args[4].dat->size[0] * (start[1] * args[4].stencil->stride[1] - args[4].dat->base[1] - d_m[1]); base4 = base4 + dat4 * args[4].dat->size[0] * args[4].dat->size[1] * (start[2] * args[4].stencil->stride[2] - args[4].dat->base[2] - d_m[2]); p_a[4] = (char *)args[4].data_d + base4; #ifdef OPS_MPI for (int d = 0; d < dim; d++) d_m[d] = args[5].dat->d_m[d] + OPS_sub_dat_list[args[5].dat->index]->d_im[d]; #else for (int d = 0; d < dim; d++) d_m[d] = args[5].dat->d_m[d]; #endif int base5 = dat5 * 1 * (start[0] * args[5].stencil->stride[0] - args[5].dat->base[0] - d_m[0]); base5 = base5 + dat5 * args[5].dat->size[0] * (start[1] * args[5].stencil->stride[1] - args[5].dat->base[1] - d_m[1]); base5 = base5 + dat5 * args[5].dat->size[0] * args[5].dat->size[1] * (start[2] * args[5].stencil->stride[2] - args[5].dat->base[2] - d_m[2]); p_a[5] = (char *)args[5].data_d + base5; #ifdef OPS_MPI for (int d = 0; d < dim; d++) d_m[d] = args[6].dat->d_m[d] + OPS_sub_dat_list[args[6].dat->index]->d_im[d]; #else for (int d = 0; d < dim; d++) d_m[d] = args[6].dat->d_m[d]; #endif int base6 = dat6 * 1 * (start[0] * args[6].stencil->stride[0] - args[6].dat->base[0] - d_m[0]); base6 = base6 + dat6 * args[6].dat->size[0] * (start[1] * args[6].stencil->stride[1] - args[6].dat->base[1] - d_m[1]); base6 = base6 + dat6 * args[6].dat->size[0] * args[6].dat->size[1] * (start[2] * args[6].stencil->stride[2] - args[6].dat->base[2] - d_m[2]); p_a[6] = (char *)args[6].data_d + base6; ops_H_D_exchanges_device(args, 7); ops_halo_exchanges(args, 7, range); if (OPS_diags > 1) { ops_timers_core(&c2, &t2); OPS_kernels[55].mpi_time += t2 - t1; } // call kernel wrapper function, passing in pointers to data hipLaunchKernelGGL(( ops_initialise_chunk_kernel_volume), dim3(grid), dim3(tblock), 0, 0, (double *)p_a[0], (double *)p_a[1], (double *)p_a[2], (double *)p_a[3], (double *)p_a[4], (double *)p_a[5], (double *)p_a[6], x_size, y_size, z_size); if (OPS_diags > 1) { cutilSafeCall(hipDeviceSynchronize()); ops_timers_core(&c1, &t1); OPS_kernels[55].time += t1 - t2; } ops_set_dirtybit_device(args, 7); ops_set_halo_dirtybit3(&args[0], range); ops_set_halo_dirtybit3(&args[2], range); ops_set_halo_dirtybit3(&args[4], range); ops_set_halo_dirtybit3(&args[6], range); if (OPS_diags > 1) { // Update kernel record ops_timers_core(&c2, &t2); OPS_kernels[55].mpi_time += t2 - t1; OPS_kernels[55].transfer += ops_compute_transfer(dim, start, end, &arg0); OPS_kernels[55].transfer += ops_compute_transfer(dim, start, end, &arg1); OPS_kernels[55].transfer += ops_compute_transfer(dim, start, end, &arg2); OPS_kernels[55].transfer += ops_compute_transfer(dim, start, end, &arg3); OPS_kernels[55].transfer += ops_compute_transfer(dim, start, end, &arg4); OPS_kernels[55].transfer += ops_compute_transfer(dim, start, end, &arg5); OPS_kernels[55].transfer += ops_compute_transfer(dim, start, end, &arg6); } }
2731e01beda1eb1f1994cdc6bf83a29255be6672.cu
// // auto-generated by ops.py // __constant__ int xdim0_initialise_chunk_kernel_volume; int xdim0_initialise_chunk_kernel_volume_h = -1; __constant__ int ydim0_initialise_chunk_kernel_volume; int ydim0_initialise_chunk_kernel_volume_h = -1; __constant__ int xdim1_initialise_chunk_kernel_volume; int xdim1_initialise_chunk_kernel_volume_h = -1; __constant__ int ydim1_initialise_chunk_kernel_volume; int ydim1_initialise_chunk_kernel_volume_h = -1; __constant__ int xdim2_initialise_chunk_kernel_volume; int xdim2_initialise_chunk_kernel_volume_h = -1; __constant__ int ydim2_initialise_chunk_kernel_volume; int ydim2_initialise_chunk_kernel_volume_h = -1; __constant__ int xdim3_initialise_chunk_kernel_volume; int xdim3_initialise_chunk_kernel_volume_h = -1; __constant__ int ydim3_initialise_chunk_kernel_volume; int ydim3_initialise_chunk_kernel_volume_h = -1; __constant__ int xdim4_initialise_chunk_kernel_volume; int xdim4_initialise_chunk_kernel_volume_h = -1; __constant__ int ydim4_initialise_chunk_kernel_volume; int ydim4_initialise_chunk_kernel_volume_h = -1; __constant__ int xdim5_initialise_chunk_kernel_volume; int xdim5_initialise_chunk_kernel_volume_h = -1; __constant__ int ydim5_initialise_chunk_kernel_volume; int ydim5_initialise_chunk_kernel_volume_h = -1; __constant__ int xdim6_initialise_chunk_kernel_volume; int xdim6_initialise_chunk_kernel_volume_h = -1; __constant__ int ydim6_initialise_chunk_kernel_volume; int ydim6_initialise_chunk_kernel_volume_h = -1; #undef OPS_ACC0 #undef OPS_ACC1 #undef OPS_ACC2 #undef OPS_ACC3 #undef OPS_ACC4 #undef OPS_ACC5 #undef OPS_ACC6 #define OPS_ACC0(x, y, z) \ (x + xdim0_initialise_chunk_kernel_volume * (y) + \ xdim0_initialise_chunk_kernel_volume * \ ydim0_initialise_chunk_kernel_volume * (z)) #define OPS_ACC1(x, y, z) \ (x + xdim1_initialise_chunk_kernel_volume * (y) + \ xdim1_initialise_chunk_kernel_volume * \ ydim1_initialise_chunk_kernel_volume * (z)) #define OPS_ACC2(x, y, z) \ (x + xdim2_initialise_chunk_kernel_volume * (y) + \ xdim2_initialise_chunk_kernel_volume * \ ydim2_initialise_chunk_kernel_volume * (z)) #define OPS_ACC3(x, y, z) \ (x + xdim3_initialise_chunk_kernel_volume * (y) + \ xdim3_initialise_chunk_kernel_volume * \ ydim3_initialise_chunk_kernel_volume * (z)) #define OPS_ACC4(x, y, z) \ (x + xdim4_initialise_chunk_kernel_volume * (y) + \ xdim4_initialise_chunk_kernel_volume * \ ydim4_initialise_chunk_kernel_volume * (z)) #define OPS_ACC5(x, y, z) \ (x + xdim5_initialise_chunk_kernel_volume * (y) + \ xdim5_initialise_chunk_kernel_volume * \ ydim5_initialise_chunk_kernel_volume * (z)) #define OPS_ACC6(x, y, z) \ (x + xdim6_initialise_chunk_kernel_volume * (y) + \ xdim6_initialise_chunk_kernel_volume * \ ydim6_initialise_chunk_kernel_volume * (z)) // user function __device__ void initialise_chunk_kernel_volume(double *volume, const double *celldy, double *xarea, const double *celldx, double *yarea, const double *celldz, double *zarea) { double d_x, d_y, d_z; d_x = (grid.xmax - grid.xmin) / (double)grid.x_cells; d_y = (grid.ymax - grid.ymin) / (double)grid.y_cells; d_z = (grid.zmax - grid.zmin) / (double)grid.z_cells; volume[OPS_ACC0(0, 0, 0)] = d_x * d_y * d_z; xarea[OPS_ACC2(0, 0, 0)] = celldy[OPS_ACC1(0, 0, 0)] * celldz[OPS_ACC5(0, 0, 0)]; yarea[OPS_ACC4(0, 0, 0)] = celldx[OPS_ACC3(0, 0, 0)] * celldz[OPS_ACC5(0, 0, 0)]; zarea[OPS_ACC6(0, 0, 0)] = celldx[OPS_ACC3(0, 0, 0)] * celldy[OPS_ACC1(0, 0, 0)]; } #undef OPS_ACC0 #undef OPS_ACC1 #undef OPS_ACC2 #undef OPS_ACC3 #undef OPS_ACC4 #undef OPS_ACC5 #undef OPS_ACC6 __global__ void ops_initialise_chunk_kernel_volume( double *__restrict arg0, const double *__restrict arg1, double *__restrict arg2, const double *__restrict arg3, double *__restrict arg4, const double *__restrict arg5, double *__restrict arg6, int size0, int size1, int size2) { int idx_z = blockDim.z * blockIdx.z + threadIdx.z; int idx_y = blockDim.y * blockIdx.y + threadIdx.y; int idx_x = blockDim.x * blockIdx.x + threadIdx.x; arg0 += idx_x * 1 * 1 + idx_y * 1 * 1 * xdim0_initialise_chunk_kernel_volume + idx_z * 1 * 1 * xdim0_initialise_chunk_kernel_volume * ydim0_initialise_chunk_kernel_volume; arg1 += idx_x * 0 * 1 + idx_y * 1 * 1 * xdim1_initialise_chunk_kernel_volume + idx_z * 0 * 1 * xdim1_initialise_chunk_kernel_volume * ydim1_initialise_chunk_kernel_volume; arg2 += idx_x * 1 * 1 + idx_y * 1 * 1 * xdim2_initialise_chunk_kernel_volume + idx_z * 1 * 1 * xdim2_initialise_chunk_kernel_volume * ydim2_initialise_chunk_kernel_volume; arg3 += idx_x * 1 * 1 + idx_y * 0 * 1 * xdim3_initialise_chunk_kernel_volume + idx_z * 0 * 1 * xdim3_initialise_chunk_kernel_volume * ydim3_initialise_chunk_kernel_volume; arg4 += idx_x * 1 * 1 + idx_y * 1 * 1 * xdim4_initialise_chunk_kernel_volume + idx_z * 1 * 1 * xdim4_initialise_chunk_kernel_volume * ydim4_initialise_chunk_kernel_volume; arg5 += idx_x * 0 * 1 + idx_y * 0 * 1 * xdim5_initialise_chunk_kernel_volume + idx_z * 1 * 1 * xdim5_initialise_chunk_kernel_volume * ydim5_initialise_chunk_kernel_volume; arg6 += idx_x * 1 * 1 + idx_y * 1 * 1 * xdim6_initialise_chunk_kernel_volume + idx_z * 1 * 1 * xdim6_initialise_chunk_kernel_volume * ydim6_initialise_chunk_kernel_volume; if (idx_x < size0 && idx_y < size1 && idx_z < size2) { initialise_chunk_kernel_volume(arg0, arg1, arg2, arg3, arg4, arg5, arg6); } } // host stub function void ops_par_loop_initialise_chunk_kernel_volume(char const *name, ops_block block, int dim, int *range, ops_arg arg0, ops_arg arg1, ops_arg arg2, ops_arg arg3, ops_arg arg4, ops_arg arg5, ops_arg arg6) { // Timing double t1, t2, c1, c2; ops_arg args[7] = {arg0, arg1, arg2, arg3, arg4, arg5, arg6}; #ifdef CHECKPOINTING if (!ops_checkpointing_before(args, 7, range, 55)) return; #endif if (OPS_diags > 1) { ops_timing_realloc(55, "initialise_chunk_kernel_volume"); OPS_kernels[55].count++; ops_timers_core(&c1, &t1); } // compute locally allocated range for the sub-block int start[3]; int end[3]; #ifdef OPS_MPI sub_block_list sb = OPS_sub_block_list[block->index]; if (!sb->owned) return; for (int n = 0; n < 3; n++) { start[n] = sb->decomp_disp[n]; end[n] = sb->decomp_disp[n] + sb->decomp_size[n]; if (start[n] >= range[2 * n]) { start[n] = 0; } else { start[n] = range[2 * n] - start[n]; } if (sb->id_m[n] == MPI_PROC_NULL && range[2 * n] < 0) start[n] = range[2 * n]; if (end[n] >= range[2 * n + 1]) { end[n] = range[2 * n + 1] - sb->decomp_disp[n]; } else { end[n] = sb->decomp_size[n]; } if (sb->id_p[n] == MPI_PROC_NULL && (range[2 * n + 1] > sb->decomp_disp[n] + sb->decomp_size[n])) end[n] += (range[2 * n + 1] - sb->decomp_disp[n] - sb->decomp_size[n]); } #else for (int n = 0; n < 3; n++) { start[n] = range[2 * n]; end[n] = range[2 * n + 1]; } #endif int x_size = MAX(0, end[0] - start[0]); int y_size = MAX(0, end[1] - start[1]); int z_size = MAX(0, end[2] - start[2]); int xdim0 = args[0].dat->size[0]; int ydim0 = args[0].dat->size[1]; int xdim1 = args[1].dat->size[0]; int ydim1 = args[1].dat->size[1]; int xdim2 = args[2].dat->size[0]; int ydim2 = args[2].dat->size[1]; int xdim3 = args[3].dat->size[0]; int ydim3 = args[3].dat->size[1]; int xdim4 = args[4].dat->size[0]; int ydim4 = args[4].dat->size[1]; int xdim5 = args[5].dat->size[0]; int ydim5 = args[5].dat->size[1]; int xdim6 = args[6].dat->size[0]; int ydim6 = args[6].dat->size[1]; if (xdim0 != xdim0_initialise_chunk_kernel_volume_h || ydim0 != ydim0_initialise_chunk_kernel_volume_h || xdim1 != xdim1_initialise_chunk_kernel_volume_h || ydim1 != ydim1_initialise_chunk_kernel_volume_h || xdim2 != xdim2_initialise_chunk_kernel_volume_h || ydim2 != ydim2_initialise_chunk_kernel_volume_h || xdim3 != xdim3_initialise_chunk_kernel_volume_h || ydim3 != ydim3_initialise_chunk_kernel_volume_h || xdim4 != xdim4_initialise_chunk_kernel_volume_h || ydim4 != ydim4_initialise_chunk_kernel_volume_h || xdim5 != xdim5_initialise_chunk_kernel_volume_h || ydim5 != ydim5_initialise_chunk_kernel_volume_h || xdim6 != xdim6_initialise_chunk_kernel_volume_h || ydim6 != ydim6_initialise_chunk_kernel_volume_h) { cudaMemcpyToSymbol(xdim0_initialise_chunk_kernel_volume, &xdim0, sizeof(int)); xdim0_initialise_chunk_kernel_volume_h = xdim0; cudaMemcpyToSymbol(ydim0_initialise_chunk_kernel_volume, &ydim0, sizeof(int)); ydim0_initialise_chunk_kernel_volume_h = ydim0; cudaMemcpyToSymbol(xdim1_initialise_chunk_kernel_volume, &xdim1, sizeof(int)); xdim1_initialise_chunk_kernel_volume_h = xdim1; cudaMemcpyToSymbol(ydim1_initialise_chunk_kernel_volume, &ydim1, sizeof(int)); ydim1_initialise_chunk_kernel_volume_h = ydim1; cudaMemcpyToSymbol(xdim2_initialise_chunk_kernel_volume, &xdim2, sizeof(int)); xdim2_initialise_chunk_kernel_volume_h = xdim2; cudaMemcpyToSymbol(ydim2_initialise_chunk_kernel_volume, &ydim2, sizeof(int)); ydim2_initialise_chunk_kernel_volume_h = ydim2; cudaMemcpyToSymbol(xdim3_initialise_chunk_kernel_volume, &xdim3, sizeof(int)); xdim3_initialise_chunk_kernel_volume_h = xdim3; cudaMemcpyToSymbol(ydim3_initialise_chunk_kernel_volume, &ydim3, sizeof(int)); ydim3_initialise_chunk_kernel_volume_h = ydim3; cudaMemcpyToSymbol(xdim4_initialise_chunk_kernel_volume, &xdim4, sizeof(int)); xdim4_initialise_chunk_kernel_volume_h = xdim4; cudaMemcpyToSymbol(ydim4_initialise_chunk_kernel_volume, &ydim4, sizeof(int)); ydim4_initialise_chunk_kernel_volume_h = ydim4; cudaMemcpyToSymbol(xdim5_initialise_chunk_kernel_volume, &xdim5, sizeof(int)); xdim5_initialise_chunk_kernel_volume_h = xdim5; cudaMemcpyToSymbol(ydim5_initialise_chunk_kernel_volume, &ydim5, sizeof(int)); ydim5_initialise_chunk_kernel_volume_h = ydim5; cudaMemcpyToSymbol(xdim6_initialise_chunk_kernel_volume, &xdim6, sizeof(int)); xdim6_initialise_chunk_kernel_volume_h = xdim6; cudaMemcpyToSymbol(ydim6_initialise_chunk_kernel_volume, &ydim6, sizeof(int)); ydim6_initialise_chunk_kernel_volume_h = ydim6; } dim3 grid((x_size - 1) / OPS_block_size_x + 1, (y_size - 1) / OPS_block_size_y + 1, z_size); dim3 tblock(OPS_block_size_x, OPS_block_size_y, 1); int dat0 = args[0].dat->elem_size; int dat1 = args[1].dat->elem_size; int dat2 = args[2].dat->elem_size; int dat3 = args[3].dat->elem_size; int dat4 = args[4].dat->elem_size; int dat5 = args[5].dat->elem_size; int dat6 = args[6].dat->elem_size; char *p_a[7]; // set up initial pointers int d_m[OPS_MAX_DIM]; #ifdef OPS_MPI for (int d = 0; d < dim; d++) d_m[d] = args[0].dat->d_m[d] + OPS_sub_dat_list[args[0].dat->index]->d_im[d]; #else for (int d = 0; d < dim; d++) d_m[d] = args[0].dat->d_m[d]; #endif int base0 = dat0 * 1 * (start[0] * args[0].stencil->stride[0] - args[0].dat->base[0] - d_m[0]); base0 = base0 + dat0 * args[0].dat->size[0] * (start[1] * args[0].stencil->stride[1] - args[0].dat->base[1] - d_m[1]); base0 = base0 + dat0 * args[0].dat->size[0] * args[0].dat->size[1] * (start[2] * args[0].stencil->stride[2] - args[0].dat->base[2] - d_m[2]); p_a[0] = (char *)args[0].data_d + base0; #ifdef OPS_MPI for (int d = 0; d < dim; d++) d_m[d] = args[1].dat->d_m[d] + OPS_sub_dat_list[args[1].dat->index]->d_im[d]; #else for (int d = 0; d < dim; d++) d_m[d] = args[1].dat->d_m[d]; #endif int base1 = dat1 * 1 * (start[0] * args[1].stencil->stride[0] - args[1].dat->base[0] - d_m[0]); base1 = base1 + dat1 * args[1].dat->size[0] * (start[1] * args[1].stencil->stride[1] - args[1].dat->base[1] - d_m[1]); base1 = base1 + dat1 * args[1].dat->size[0] * args[1].dat->size[1] * (start[2] * args[1].stencil->stride[2] - args[1].dat->base[2] - d_m[2]); p_a[1] = (char *)args[1].data_d + base1; #ifdef OPS_MPI for (int d = 0; d < dim; d++) d_m[d] = args[2].dat->d_m[d] + OPS_sub_dat_list[args[2].dat->index]->d_im[d]; #else for (int d = 0; d < dim; d++) d_m[d] = args[2].dat->d_m[d]; #endif int base2 = dat2 * 1 * (start[0] * args[2].stencil->stride[0] - args[2].dat->base[0] - d_m[0]); base2 = base2 + dat2 * args[2].dat->size[0] * (start[1] * args[2].stencil->stride[1] - args[2].dat->base[1] - d_m[1]); base2 = base2 + dat2 * args[2].dat->size[0] * args[2].dat->size[1] * (start[2] * args[2].stencil->stride[2] - args[2].dat->base[2] - d_m[2]); p_a[2] = (char *)args[2].data_d + base2; #ifdef OPS_MPI for (int d = 0; d < dim; d++) d_m[d] = args[3].dat->d_m[d] + OPS_sub_dat_list[args[3].dat->index]->d_im[d]; #else for (int d = 0; d < dim; d++) d_m[d] = args[3].dat->d_m[d]; #endif int base3 = dat3 * 1 * (start[0] * args[3].stencil->stride[0] - args[3].dat->base[0] - d_m[0]); base3 = base3 + dat3 * args[3].dat->size[0] * (start[1] * args[3].stencil->stride[1] - args[3].dat->base[1] - d_m[1]); base3 = base3 + dat3 * args[3].dat->size[0] * args[3].dat->size[1] * (start[2] * args[3].stencil->stride[2] - args[3].dat->base[2] - d_m[2]); p_a[3] = (char *)args[3].data_d + base3; #ifdef OPS_MPI for (int d = 0; d < dim; d++) d_m[d] = args[4].dat->d_m[d] + OPS_sub_dat_list[args[4].dat->index]->d_im[d]; #else for (int d = 0; d < dim; d++) d_m[d] = args[4].dat->d_m[d]; #endif int base4 = dat4 * 1 * (start[0] * args[4].stencil->stride[0] - args[4].dat->base[0] - d_m[0]); base4 = base4 + dat4 * args[4].dat->size[0] * (start[1] * args[4].stencil->stride[1] - args[4].dat->base[1] - d_m[1]); base4 = base4 + dat4 * args[4].dat->size[0] * args[4].dat->size[1] * (start[2] * args[4].stencil->stride[2] - args[4].dat->base[2] - d_m[2]); p_a[4] = (char *)args[4].data_d + base4; #ifdef OPS_MPI for (int d = 0; d < dim; d++) d_m[d] = args[5].dat->d_m[d] + OPS_sub_dat_list[args[5].dat->index]->d_im[d]; #else for (int d = 0; d < dim; d++) d_m[d] = args[5].dat->d_m[d]; #endif int base5 = dat5 * 1 * (start[0] * args[5].stencil->stride[0] - args[5].dat->base[0] - d_m[0]); base5 = base5 + dat5 * args[5].dat->size[0] * (start[1] * args[5].stencil->stride[1] - args[5].dat->base[1] - d_m[1]); base5 = base5 + dat5 * args[5].dat->size[0] * args[5].dat->size[1] * (start[2] * args[5].stencil->stride[2] - args[5].dat->base[2] - d_m[2]); p_a[5] = (char *)args[5].data_d + base5; #ifdef OPS_MPI for (int d = 0; d < dim; d++) d_m[d] = args[6].dat->d_m[d] + OPS_sub_dat_list[args[6].dat->index]->d_im[d]; #else for (int d = 0; d < dim; d++) d_m[d] = args[6].dat->d_m[d]; #endif int base6 = dat6 * 1 * (start[0] * args[6].stencil->stride[0] - args[6].dat->base[0] - d_m[0]); base6 = base6 + dat6 * args[6].dat->size[0] * (start[1] * args[6].stencil->stride[1] - args[6].dat->base[1] - d_m[1]); base6 = base6 + dat6 * args[6].dat->size[0] * args[6].dat->size[1] * (start[2] * args[6].stencil->stride[2] - args[6].dat->base[2] - d_m[2]); p_a[6] = (char *)args[6].data_d + base6; ops_H_D_exchanges_device(args, 7); ops_halo_exchanges(args, 7, range); if (OPS_diags > 1) { ops_timers_core(&c2, &t2); OPS_kernels[55].mpi_time += t2 - t1; } // call kernel wrapper function, passing in pointers to data ops_initialise_chunk_kernel_volume<<<grid, tblock>>>( (double *)p_a[0], (double *)p_a[1], (double *)p_a[2], (double *)p_a[3], (double *)p_a[4], (double *)p_a[5], (double *)p_a[6], x_size, y_size, z_size); if (OPS_diags > 1) { cutilSafeCall(cudaDeviceSynchronize()); ops_timers_core(&c1, &t1); OPS_kernels[55].time += t1 - t2; } ops_set_dirtybit_device(args, 7); ops_set_halo_dirtybit3(&args[0], range); ops_set_halo_dirtybit3(&args[2], range); ops_set_halo_dirtybit3(&args[4], range); ops_set_halo_dirtybit3(&args[6], range); if (OPS_diags > 1) { // Update kernel record ops_timers_core(&c2, &t2); OPS_kernels[55].mpi_time += t2 - t1; OPS_kernels[55].transfer += ops_compute_transfer(dim, start, end, &arg0); OPS_kernels[55].transfer += ops_compute_transfer(dim, start, end, &arg1); OPS_kernels[55].transfer += ops_compute_transfer(dim, start, end, &arg2); OPS_kernels[55].transfer += ops_compute_transfer(dim, start, end, &arg3); OPS_kernels[55].transfer += ops_compute_transfer(dim, start, end, &arg4); OPS_kernels[55].transfer += ops_compute_transfer(dim, start, end, &arg5); OPS_kernels[55].transfer += ops_compute_transfer(dim, start, end, &arg6); } }
4b964d103e4ab91cfec208b8936e1119a3d633bb.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include "../hTypes.h" #define GAUSSIAN 4096 #define UNIFORM 24 #define NONE 0 namespace Helix { template<typename F> __device__ inline F getRandomSeed(F type, hiprandState_t *state) { switch ((int)type){ case GAUSSIAN: return (typeid(F) == typeid(float) ? hiprand_normal(state) : hiprand_normal_double(state)); case UNIFORM: return (typeid(F) == typeid(float) ? hiprand_uniform(state) : hiprand_uniform_double(state)); } } template<typename F> __global__ void generateRandomParticles(F4<F> *particles, F4<F> *limits, hiprandState_t *states, int *strides, int offset) { int idx = blockDim.x * blockIdx.x + threadIdx.x; hiprandState_t state = states[idx]; for (int i = 0; i < offset; i++) { int distribution = (int) limits->w; F dl = limits->x - limits.y + 0.999999; } for (int i = 0, p = 0; i < sizeof(strides) / sizeof(strides[0]); i++, p += 3) { int distribution = (int) limits[p + 2]; int elements = strides[i]; F dl = limits [p] - limits[p + 1] + 0.999999; if (distribution == NONE) idx += strides[i]; continue; for (int q = 0; q < elements; q++) { particles[idx] = (getRandomSeed<F>(distribution, state) * dl) + limits[p + 1]; idx++; } } } template<typename F> __global__ void generateDensityParticles(F *particles, UniSimFmt<F> *limits, hiprandState_t *states) { int idx = blockDim.x * blockIdx.x + threadIdx.x; hiprandState_t state = states[idx]; } template<typename F> void densityParticleGeneration(UniSimFmt<F> *limits, F *_particles, F *_dParticles, const int n, bool localCpy = false) { hiprandState_t *states = malloc(sizeof(hiprandState_t) * n); hiprandState_t *dStates; hipMalloc(&dStates, sizeof(states)); hipMemcpy(dStates, states, sizeof(states), hipMemcpyHostToDevice); F *limArr = limits->toCudaFmt(); F *dLimits = cudaAlloCopy<F>(limArr, sizeof(limArr)); _dParticles = cudaAlloCopy<F>(_particles, sizeof(_particles)); } template<typename F> void distributionGeneration(F4<F> *_particles, F4<F> *_dParticles, GenerationLimits<F4<F>> *limits, int nParticles, dim3 *blocks, dim3 *threads, int offset, bool localCpy = false) { hiprandState_t *states = malloc(sizeof(hiprandState_t) * nParticles); hiprandState_t *dStates; hipMalloc (&dStates, sizeof(states)); hipMemcpy (dStates, states, allocationSize, hipMemcpyHostToDevice); F *dLimits = cudaAlloCopy<F> (_limits, sizeof(limits->vec)); int *dStrides = cudaAlloCopy<int> (_strides, sizeof(_strides)); hipLaunchKernelGGL(( generateRandomParticles<F>), dim3(*blocks), dim3(*threads), 0, 0, _dParticles, dLimits, dStates, dStrides, offset); if (localCpy) { hipMemcpy(_particles, _dParticles, size, hipMemcpyDeviceToHost); hipFree(_dParticles); } hipFree(dLimits); hipFree(dStrides); hipFree(dStates); delete states; } template void distributionGeneration<float> (float *, float *, float *, int, dim3 *, dim3 *, bool); template void distributionGeneration<double> (double *, double *, double *, int, dim3 *, dim3 *, bool); }
4b964d103e4ab91cfec208b8936e1119a3d633bb.cu
#include "cuda.h" #include "../hTypes.h" #define GAUSSIAN 4096 #define UNIFORM 24 #define NONE 0 namespace Helix { template<typename F> __device__ inline F getRandomSeed(F type, curandState *state) { switch ((int)type){ case GAUSSIAN: return (typeid(F) == typeid(float) ? curand_normal(state) : curand_normal_double(state)); case UNIFORM: return (typeid(F) == typeid(float) ? curand_uniform(state) : curand_uniform_double(state)); } } template<typename F> __global__ void generateRandomParticles(F4<F> *particles, F4<F> *limits, curandState *states, int *strides, int offset) { int idx = blockDim.x * blockIdx.x + threadIdx.x; curandState state = states[idx]; for (int i = 0; i < offset; i++) { int distribution = (int) limits->w; F dl = limits->x - limits.y + 0.999999; } for (int i = 0, p = 0; i < sizeof(strides) / sizeof(strides[0]); i++, p += 3) { int distribution = (int) limits[p + 2]; int elements = strides[i]; F dl = limits [p] - limits[p + 1] + 0.999999; if (distribution == NONE) idx += strides[i]; continue; for (int q = 0; q < elements; q++) { particles[idx] = (getRandomSeed<F>(distribution, state) * dl) + limits[p + 1]; idx++; } } } template<typename F> __global__ void generateDensityParticles(F *particles, UniSimFmt<F> *limits, curandState *states) { int idx = blockDim.x * blockIdx.x + threadIdx.x; curandState state = states[idx]; } template<typename F> void densityParticleGeneration(UniSimFmt<F> *limits, F *_particles, F *_dParticles, const int n, bool localCpy = false) { curandState *states = malloc(sizeof(curandState) * n); curandState *dStates; cudaMalloc(&dStates, sizeof(states)); cudaMemcpy(dStates, states, sizeof(states), cudaMemcpyHostToDevice); F *limArr = limits->toCudaFmt(); F *dLimits = cudaAlloCopy<F>(limArr, sizeof(limArr)); _dParticles = cudaAlloCopy<F>(_particles, sizeof(_particles)); } template<typename F> void distributionGeneration(F4<F> *_particles, F4<F> *_dParticles, GenerationLimits<F4<F>> *limits, int nParticles, dim3 *blocks, dim3 *threads, int offset, bool localCpy = false) { curandState *states = malloc(sizeof(curandState) * nParticles); curandState *dStates; cudaMalloc (&dStates, sizeof(states)); cudaMemcpy (dStates, states, allocationSize, cudaMemcpyHostToDevice); F *dLimits = cudaAlloCopy<F> (_limits, sizeof(limits->vec)); int *dStrides = cudaAlloCopy<int> (_strides, sizeof(_strides)); generateRandomParticles<F><<<*blocks, *threads>>>(_dParticles, dLimits, dStates, dStrides, offset); if (localCpy) { cudaMemcpy(_particles, _dParticles, size, cudaMemcpyDeviceToHost); cudaFree(_dParticles); } cudaFree(dLimits); cudaFree(dStrides); cudaFree(dStates); delete states; } template void distributionGeneration<float> (float *, float *, float *, int, dim3 *, dim3 *, bool); template void distributionGeneration<double> (double *, double *, double *, int, dim3 *, dim3 *, bool); }
4aaebbffcfcb91168dd7fec0914c4969e563d02a.hip
// !!! This is a file automatically generated by hipify!!! #include "block.h" #include "cuda_common.h" #include <hip/hip_runtime.h> __constant__ double a_t; __constant__ double L[3]; __constant__ double bshift[3]; __constant__ int bsize[3]; __constant__ int bmin[3]; __device__ double d_bu(double x, double y, double z, double t) { return sin((M_PI / L[0]) * x) * sin((M_PI / L[1]) * y) * sin((M_PI / L[2]) * z) * cos(a_t * t);; } __global__ void g_calcErrorK(double* d_error, double* block, double t) { extern __shared__ double sdata[]; int i = blockIdx.z + 1; int j = blockIdx.y + 1; int k = threadIdx.x + 1; int tid = threadIdx.x; int ind = i * (bsize[1] * bsize[2]) + j * bsize[2] + k; sdata[tid] = fabs(block[ind] - d_bu((i + bmin[0]) * bshift[0], (j + bmin[1]) * bshift[1], (k + bmin[2]) * bshift[2], t)); __syncthreads(); for (unsigned int s=blockDim.x/2; s>0; s>>=1) { if (tid < s) { sdata[tid] = fmax(sdata[tid], sdata[tid + s]); } __syncthreads(); } if (tid == 0) d_error[blockIdx.z * gridDim.y + blockIdx.y] = sdata[0]; } __global__ void g_calcErrorJ(double* d_errorJ, double* d_errorK) { extern __shared__ double sdata[]; int tid = threadIdx.x; int ind = blockDim.x * blockIdx.y + threadIdx.x; sdata[tid] = d_errorK[ind]; __syncthreads(); for (unsigned int s=blockDim.x/2; s>0; s>>=1) { if (tid < s) { sdata[tid] = fmax(sdata[tid], sdata[tid + s]); } __syncthreads(); } if (tid == 0) d_errorJ[blockIdx.y] = sdata[0]; } __global__ void g_calcErrorI(double* d_errorJ) { extern __shared__ double sdata[]; int tid = threadIdx.x; sdata[tid] = d_errorJ[tid]; __syncthreads(); for (unsigned int s=blockDim.x/2; s>0; s>>=1) { if (tid < s) { sdata[tid] = fmax(sdata[tid], sdata[tid + s]); } __syncthreads(); } if (tid == 0) d_errorJ[0] = sdata[0]; } __global__ void g_getSlice(double* block, double* slice, int axis, int item) { int ijk[3]; ijk[(axis + 2) % 3] = threadIdx.x + 1; ijk[(axis + 1) % 3] = blockIdx.y + 1; ijk[axis] = item; slice[threadIdx.x * gridDim.y + blockIdx.y] = block[ijk[0] * (bsize[1] * bsize[2]) + ijk[1] * bsize[2] + ijk[2]]; } __global__ void g_setSlice(double* block, double* slice, int axis, int item) { int ijk[3]; ijk[(axis + 2) % 3] = threadIdx.x + 1; ijk[(axis + 1) % 3] = blockIdx.y + 1; ijk[axis] = item; block[ijk[0] * (bsize[1] * bsize[2]) + ijk[1] * bsize[2] + ijk[2]] = slice[threadIdx.x * gridDim.y + blockIdx.y]; } Block::Block(std::vector<int>& size, std::vector<int>& min, std::vector<double>& shift) : _size({size[0] + 2, size[1] + 2, size[2] + 2}), _min(min), _shift(shift) { _size = {size[0] + 2, size[1] + 2, size[2] + 2}; _shift = shift; _raw.resize(_size[0] * _size[1] * _size[2], 0.0); } void Block::init_cuda() { int max_ssize = ::max((_size[0] - 2) * (_size[1] - 2), ::max((_size[0] - 2) * (_size[2] - 2), (_size[1] - 2) * (_size[2] - 2))); SAFE_CALL(hipMalloc((void**)&_d_raw, sizeof(double) * _size[0] * _size[1] * _size[2])); SAFE_CALL(hipMalloc((void**)&_d_errorK, sizeof(double) * (_size[0] - 2) * (_size[1] - 2))); SAFE_CALL(hipMalloc((void**)&_d_errorJ, sizeof(double) * (_size[0] - 2))); SAFE_CALL(hipMalloc((void**)&_d_slice, sizeof(double) * max_ssize)); } void Block::destroy_cuda() { hipFree(_d_raw); hipFree(_d_errorK); hipFree(_d_errorJ); hipFree(_d_slice); } //////////////////////////////////////////////////////////// //TODO void Block::printBlock() const { return; }; void Block::printDiff(Function3D &u, double t) const { return; }; void Block::saveBlock(std::string &str) const { return; }; //////////////////////////////////////////////////////////// double Block::getError(Function3D &u, double t) const { double h_a_t = u.a_t(); std::vector<double> h_L = u.getL();; double h_error; dim3 grid; dim3 block; checkCudaErrors(); SAFE_CALL(hipMemcpyToSymbol(a_t, &h_a_t, sizeof(double))); SAFE_CALL(hipMemcpyToSymbol(L, h_L.data(), sizeof(double) * 3)); SAFE_CALL(hipMemcpyToSymbol(bshift, _shift.data(), sizeof(double) * 3)); SAFE_CALL(hipMemcpyToSymbol(bsize, _size.data(), sizeof(int) * 3)); SAFE_CALL(hipMemcpyToSymbol(bmin, _min.data(), sizeof(int) * 3)); grid = dim3(1, (_size[1] - 2), (_size[0] - 2)); block = dim3(_size[2] - 2, 1, 1); hipLaunchKernelGGL(( g_calcErrorK), dim3(grid), dim3(block), block.x * sizeof(double), 0, _d_errorK, _d_raw, t); checkCudaErrors(); grid = dim3(1, (_size[0] - 2), 1); block = dim3(_size[1] - 2, 1, 1); hipLaunchKernelGGL(( g_calcErrorJ), dim3(grid), dim3(block), block.x * sizeof(double), 0, _d_errorJ, _d_errorK); checkCudaErrors(); grid = dim3(1, 1, 1); block = dim3(_size[0] - 2, 1, 1); hipLaunchKernelGGL(( g_calcErrorI), dim3(grid), dim3(block), block.x * sizeof(double), 0, _d_errorJ); checkCudaErrors(); SAFE_CALL(hipMemcpy(&h_error, _d_errorJ, sizeof(double), hipMemcpyDeviceToHost)); return h_error; } double& Block::getElem(int i, int j, int k) { return _raw[i * (_size[1] * _size[2]) + j * _size[2] + k]; } double Block::getValElem(int i, int j, int k) const { return _raw[i * (_size[1] * _size[2]) + j * _size[2] + k]; } std::vector<double> Block::getSlice(int axis, int item) const { int ssize = (_size[(axis + 1) % 3] - 2) * (_size[(axis + 2) % 3] - 2); SAFE_CALL(hipMemcpyToSymbol(bsize, _size.data(), sizeof(int) * 3)); dim3 grid = dim3(1, _size[(axis + 1) % 3] - 2, 1); dim3 block = dim3(_size[(axis + 2) % 3] - 2, 1, 1); hipLaunchKernelGGL(( g_getSlice), dim3(grid), dim3(block), 0, 0, _d_raw, _d_slice, axis, item); std::vector<double> h_slice (ssize, 0); SAFE_CALL(hipMemcpy(h_slice.data(), _d_slice, sizeof(double) * ssize, hipMemcpyDeviceToHost)); return h_slice; }; void Block::setSlice(const std::vector<double>& slice, int axis, int item) { int ssize = slice.size(); SAFE_CALL(hipMemcpyToSymbol(bsize, _size.data(), sizeof(int) * 3)); SAFE_CALL(hipMemcpy(_d_slice, slice.data(), sizeof(double) * ssize, hipMemcpyHostToDevice)); dim3 grid = dim3(1, _size[(axis + 1) % 3] - 2, 1); dim3 block = dim3(_size[(axis + 2) % 3] - 2, 1, 1); hipLaunchKernelGGL(( g_setSlice), dim3(grid), dim3(block), 0, 0, _d_raw, _d_slice, axis, item); } double Block::lap_h(int i, int j, int k) const { return (getValElem(i - 1, j, k) - 2 * getValElem(i, j, k) + getValElem(i + 1, j, k)) / pow(_shift[0], 2) + (getValElem(i, j - 1, k) - 2 * getValElem(i, j, k) + getValElem(i, j + 1, k)) / pow(_shift[1], 2) + (getValElem(i, j, k - 1) - 2 * getValElem(i, j, k) + getValElem(i, j, k + 1)) / pow(_shift[2], 2); }
4aaebbffcfcb91168dd7fec0914c4969e563d02a.cu
#include "block.h" #include "cuda_common.h" #include <cuda.h> __constant__ double a_t; __constant__ double L[3]; __constant__ double bshift[3]; __constant__ int bsize[3]; __constant__ int bmin[3]; __device__ double d_bu(double x, double y, double z, double t) { return sin((M_PI / L[0]) * x) * sin((M_PI / L[1]) * y) * sin((M_PI / L[2]) * z) * cos(a_t * t);; } __global__ void g_calcErrorK(double* d_error, double* block, double t) { extern __shared__ double sdata[]; int i = blockIdx.z + 1; int j = blockIdx.y + 1; int k = threadIdx.x + 1; int tid = threadIdx.x; int ind = i * (bsize[1] * bsize[2]) + j * bsize[2] + k; sdata[tid] = fabs(block[ind] - d_bu((i + bmin[0]) * bshift[0], (j + bmin[1]) * bshift[1], (k + bmin[2]) * bshift[2], t)); __syncthreads(); for (unsigned int s=blockDim.x/2; s>0; s>>=1) { if (tid < s) { sdata[tid] = fmax(sdata[tid], sdata[tid + s]); } __syncthreads(); } if (tid == 0) d_error[blockIdx.z * gridDim.y + blockIdx.y] = sdata[0]; } __global__ void g_calcErrorJ(double* d_errorJ, double* d_errorK) { extern __shared__ double sdata[]; int tid = threadIdx.x; int ind = blockDim.x * blockIdx.y + threadIdx.x; sdata[tid] = d_errorK[ind]; __syncthreads(); for (unsigned int s=blockDim.x/2; s>0; s>>=1) { if (tid < s) { sdata[tid] = fmax(sdata[tid], sdata[tid + s]); } __syncthreads(); } if (tid == 0) d_errorJ[blockIdx.y] = sdata[0]; } __global__ void g_calcErrorI(double* d_errorJ) { extern __shared__ double sdata[]; int tid = threadIdx.x; sdata[tid] = d_errorJ[tid]; __syncthreads(); for (unsigned int s=blockDim.x/2; s>0; s>>=1) { if (tid < s) { sdata[tid] = fmax(sdata[tid], sdata[tid + s]); } __syncthreads(); } if (tid == 0) d_errorJ[0] = sdata[0]; } __global__ void g_getSlice(double* block, double* slice, int axis, int item) { int ijk[3]; ijk[(axis + 2) % 3] = threadIdx.x + 1; ijk[(axis + 1) % 3] = blockIdx.y + 1; ijk[axis] = item; slice[threadIdx.x * gridDim.y + blockIdx.y] = block[ijk[0] * (bsize[1] * bsize[2]) + ijk[1] * bsize[2] + ijk[2]]; } __global__ void g_setSlice(double* block, double* slice, int axis, int item) { int ijk[3]; ijk[(axis + 2) % 3] = threadIdx.x + 1; ijk[(axis + 1) % 3] = blockIdx.y + 1; ijk[axis] = item; block[ijk[0] * (bsize[1] * bsize[2]) + ijk[1] * bsize[2] + ijk[2]] = slice[threadIdx.x * gridDim.y + blockIdx.y]; } Block::Block(std::vector<int>& size, std::vector<int>& min, std::vector<double>& shift) : _size({size[0] + 2, size[1] + 2, size[2] + 2}), _min(min), _shift(shift) { _size = {size[0] + 2, size[1] + 2, size[2] + 2}; _shift = shift; _raw.resize(_size[0] * _size[1] * _size[2], 0.0); } void Block::init_cuda() { int max_ssize = std::max((_size[0] - 2) * (_size[1] - 2), std::max((_size[0] - 2) * (_size[2] - 2), (_size[1] - 2) * (_size[2] - 2))); SAFE_CALL(cudaMalloc((void**)&_d_raw, sizeof(double) * _size[0] * _size[1] * _size[2])); SAFE_CALL(cudaMalloc((void**)&_d_errorK, sizeof(double) * (_size[0] - 2) * (_size[1] - 2))); SAFE_CALL(cudaMalloc((void**)&_d_errorJ, sizeof(double) * (_size[0] - 2))); SAFE_CALL(cudaMalloc((void**)&_d_slice, sizeof(double) * max_ssize)); } void Block::destroy_cuda() { cudaFree(_d_raw); cudaFree(_d_errorK); cudaFree(_d_errorJ); cudaFree(_d_slice); } //////////////////////////////////////////////////////////// //TODO void Block::printBlock() const { return; }; void Block::printDiff(Function3D &u, double t) const { return; }; void Block::saveBlock(std::string &str) const { return; }; //////////////////////////////////////////////////////////// double Block::getError(Function3D &u, double t) const { double h_a_t = u.a_t(); std::vector<double> h_L = u.getL();; double h_error; dim3 grid; dim3 block; checkCudaErrors(); SAFE_CALL(cudaMemcpyToSymbol(a_t, &h_a_t, sizeof(double))); SAFE_CALL(cudaMemcpyToSymbol(L, h_L.data(), sizeof(double) * 3)); SAFE_CALL(cudaMemcpyToSymbol(bshift, _shift.data(), sizeof(double) * 3)); SAFE_CALL(cudaMemcpyToSymbol(bsize, _size.data(), sizeof(int) * 3)); SAFE_CALL(cudaMemcpyToSymbol(bmin, _min.data(), sizeof(int) * 3)); grid = dim3(1, (_size[1] - 2), (_size[0] - 2)); block = dim3(_size[2] - 2, 1, 1); g_calcErrorK<<<grid, block, block.x * sizeof(double)>>>(_d_errorK, _d_raw, t); checkCudaErrors(); grid = dim3(1, (_size[0] - 2), 1); block = dim3(_size[1] - 2, 1, 1); g_calcErrorJ<<<grid, block, block.x * sizeof(double)>>>(_d_errorJ, _d_errorK); checkCudaErrors(); grid = dim3(1, 1, 1); block = dim3(_size[0] - 2, 1, 1); g_calcErrorI<<<grid, block, block.x * sizeof(double)>>>(_d_errorJ); checkCudaErrors(); SAFE_CALL(cudaMemcpy(&h_error, _d_errorJ, sizeof(double), cudaMemcpyDeviceToHost)); return h_error; } double& Block::getElem(int i, int j, int k) { return _raw[i * (_size[1] * _size[2]) + j * _size[2] + k]; } double Block::getValElem(int i, int j, int k) const { return _raw[i * (_size[1] * _size[2]) + j * _size[2] + k]; } std::vector<double> Block::getSlice(int axis, int item) const { int ssize = (_size[(axis + 1) % 3] - 2) * (_size[(axis + 2) % 3] - 2); SAFE_CALL(cudaMemcpyToSymbol(bsize, _size.data(), sizeof(int) * 3)); dim3 grid = dim3(1, _size[(axis + 1) % 3] - 2, 1); dim3 block = dim3(_size[(axis + 2) % 3] - 2, 1, 1); g_getSlice<<<grid, block>>>(_d_raw, _d_slice, axis, item); std::vector<double> h_slice (ssize, 0); SAFE_CALL(cudaMemcpy(h_slice.data(), _d_slice, sizeof(double) * ssize, cudaMemcpyDeviceToHost)); return h_slice; }; void Block::setSlice(const std::vector<double>& slice, int axis, int item) { int ssize = slice.size(); SAFE_CALL(cudaMemcpyToSymbol(bsize, _size.data(), sizeof(int) * 3)); SAFE_CALL(cudaMemcpy(_d_slice, slice.data(), sizeof(double) * ssize, cudaMemcpyHostToDevice)); dim3 grid = dim3(1, _size[(axis + 1) % 3] - 2, 1); dim3 block = dim3(_size[(axis + 2) % 3] - 2, 1, 1); g_setSlice<<<grid, block>>>(_d_raw, _d_slice, axis, item); } double Block::lap_h(int i, int j, int k) const { return (getValElem(i - 1, j, k) - 2 * getValElem(i, j, k) + getValElem(i + 1, j, k)) / pow(_shift[0], 2) + (getValElem(i, j - 1, k) - 2 * getValElem(i, j, k) + getValElem(i, j + 1, k)) / pow(_shift[1], 2) + (getValElem(i, j, k - 1) - 2 * getValElem(i, j, k) + getValElem(i, j, k + 1)) / pow(_shift[2], 2); }
979e2cb7dd885fda5c1f9832c27713fd3f57498b.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /* * Portions Copyright (c) 1993-2015 NVIDIA Corporation. All rights reserved. * Please refer to the NVIDIA end user license agreement (EULA) associated * with this source code for terms and conditions that govern your use of * this software. Any use, reproduction, disclosure, or distribution of * this software and related documentation outside the terms of the EULA * is strictly prohibited. * * Portions Copyright (c) 2009 Mike Giles, Oxford University. All rights reserved. * Portions Copyright (c) 2008 Frances Y. Kuo and Stephen Joe. All rights reserved. * * Sobol Quasi-random Number Generator example * * Based on CUDA code submitted by Mike Giles, Oxford University, United Kingdom * http://people.maths.ox.ac.uk/~gilesm/ * * and C code developed by Stephen Joe, University of Waikato, New Zealand * and Frances Kuo, University of New South Wales, Australia * http://web.maths.unsw.edu.au/~fkuo/sobol/ * * For theoretical background see: * * P. Bratley and B.L. Fox. * Implementing Sobol's quasirandom sequence generator * http://portal.acm.org/citation.cfm?id=42288 * ACM Trans. on Math. Software, 14(1):88-100, 1988 * * S. Joe and F. Kuo. * Remark on algorithm 659: implementing Sobol's quasirandom sequence generator. * http://portal.acm.org/citation.cfm?id=641879 * ACM Trans. on Math. Software, 29(1):49-57, 2003 * */ #include "sobol.h" #include "sobol_gpu.h" #include <helper_cuda.h> #define k_2powneg32 2.3283064E-10F __global__ void sobolGPU_kernel(unsigned n_vectors, unsigned n_dimensions, unsigned *d_directions, float *d_output) { __shared__ unsigned int v[n_directions]; // Offset into the correct dimension as specified by the // block y coordinate d_directions = d_directions + n_directions * blockIdx.y; d_output = d_output + n_vectors * blockIdx.y; // Copy the direction numbers for this dimension into shared // memory - there are only 32 direction numbers so only the // first 32 (n_directions) threads need participate. if (threadIdx.x < n_directions) { v[threadIdx.x] = d_directions[threadIdx.x]; } __syncthreads(); // Set initial index (i.e. which vector this thread is // computing first) and stride (i.e. step to the next vector // for this thread) int i0 = threadIdx.x + blockIdx.x * blockDim.x; int stride = gridDim.x * blockDim.x; // Get the gray code of the index // c.f. Numerical Recipes in C, chapter 20 // http://www.nrbook.com/a/bookcpdf/c20-2.pdf unsigned int g = i0 ^ (i0 >> 1); // Initialisation for first point x[i0] // In the Bratley and Fox paper this is equation (*), where // we are computing the value for x[n] without knowing the // value of x[n-1]. unsigned int X = 0; unsigned int mask; for (unsigned int k = 0 ; k < __ffs(stride) - 1 ; k++) { // We want X ^= g_k * v[k], where g_k is one or zero. // We do this by setting a mask with all bits equal to // g_k. In reality we keep shifting g so that g_k is the // LSB of g. This way we avoid multiplication. mask = - (g & 1); X ^= mask & v[k]; g = g >> 1; } if (i0 < n_vectors) { d_output[i0] = (float)X * k_2powneg32; } // Now do rest of points, using the stride // Here we want to generate x[i] from x[i-stride] where we // don't have any of the x in between, therefore we have to // revisit the equation (**), this is easiest with an example // so assume stride is 16. // From x[n] to x[n+16] there will be: // 8 changes in the first bit // 4 changes in the second bit // 2 changes in the third bit // 1 change in the fourth // 1 change in one of the remaining bits // // What this means is that in the equation: // x[n+1] = x[n] ^ v[p] // x[n+2] = x[n+1] ^ v[q] = x[n] ^ v[p] ^ v[q] // ... // We will apply xor with v[1] eight times, v[2] four times, // v[3] twice, v[4] once and one other direction number once. // Since two xors cancel out, we can skip even applications // and just apply xor with v[4] (i.e. log2(16)) and with // the current applicable direction number. // Note that all these indices count from 1, so we need to // subtract 1 from them all to account for C arrays counting // from zero. unsigned int v_log2stridem1 = v[__ffs(stride) - 2]; unsigned int v_stridemask = stride - 1; for (unsigned int i = i0 + stride ; i < n_vectors ; i += stride) { // x[i] = x[i-stride] ^ v[b] ^ v[c] // where b is log2(stride) minus 1 for C array indexing // where c is the index of the rightmost zero bit in i, // not including the bottom log2(stride) bits, minus 1 // for C array indexing // In the Bratley and Fox paper this is equation (**) X ^= v_log2stridem1 ^ v[__ffs(~((i - stride) | v_stridemask)) - 1]; d_output[i] = (float)X * k_2powneg32; } } extern "C" void sobolGPU(int n_vectors, int n_dimensions, unsigned int *d_directions, float *d_output) { //const int threadsperblock = 64; //const int threadsperblock = 32; //const int threadsperblock = 128; //const int threadsperblock = 256; //const int threadsperblock = 512; const int threadsperblock = 1024; // Set up the execution configuration dim3 dimGrid; dim3 dimBlock; int device; hipDeviceProp_t prop; checkCudaErrors(hipGetDevice(&device)); checkCudaErrors(hipGetDeviceProperties(&prop, device)); // This implementation of the generator outputs all the draws for // one dimension in a contiguous region of memory, followed by the // next dimension and so on. // Therefore all threads within a block will be processing different // vectors from the same dimension. As a result we want the total // number of blocks to be a multiple of the number of dimensions. dimGrid.y = n_dimensions; // If the number of dimensions is large then we will set the number // of blocks to equal the number of dimensions (i.e. dimGrid.x = 1) // but if the number of dimensions is small (e.g. less than four per // multiprocessor) then we'll partition the vectors across blocks // (as well as threads). if (n_dimensions < (4 * prop.multiProcessorCount)) { dimGrid.x = 4 * prop.multiProcessorCount; } else { dimGrid.x = 1; } // Cap the dimGrid.x if the number of vectors is small if (dimGrid.x > (unsigned int)(n_vectors / threadsperblock)) { dimGrid.x = (n_vectors + threadsperblock - 1) / threadsperblock; } // Round up to a power of two, required for the algorithm so that // stride is a power of two. unsigned int targetDimGridX = dimGrid.x; for (dimGrid.x = 1 ; dimGrid.x < targetDimGridX ; dimGrid.x *= 2); // Fix the number of threads dimBlock.x = threadsperblock; // Execute GPU kernel hipLaunchKernelGGL(( sobolGPU_kernel), dim3(dimGrid), dim3(dimBlock), 0, 0, n_vectors, n_dimensions, d_directions, d_output); }
979e2cb7dd885fda5c1f9832c27713fd3f57498b.cu
/* * Portions Copyright (c) 1993-2015 NVIDIA Corporation. All rights reserved. * Please refer to the NVIDIA end user license agreement (EULA) associated * with this source code for terms and conditions that govern your use of * this software. Any use, reproduction, disclosure, or distribution of * this software and related documentation outside the terms of the EULA * is strictly prohibited. * * Portions Copyright (c) 2009 Mike Giles, Oxford University. All rights reserved. * Portions Copyright (c) 2008 Frances Y. Kuo and Stephen Joe. All rights reserved. * * Sobol Quasi-random Number Generator example * * Based on CUDA code submitted by Mike Giles, Oxford University, United Kingdom * http://people.maths.ox.ac.uk/~gilesm/ * * and C code developed by Stephen Joe, University of Waikato, New Zealand * and Frances Kuo, University of New South Wales, Australia * http://web.maths.unsw.edu.au/~fkuo/sobol/ * * For theoretical background see: * * P. Bratley and B.L. Fox. * Implementing Sobol's quasirandom sequence generator * http://portal.acm.org/citation.cfm?id=42288 * ACM Trans. on Math. Software, 14(1):88-100, 1988 * * S. Joe and F. Kuo. * Remark on algorithm 659: implementing Sobol's quasirandom sequence generator. * http://portal.acm.org/citation.cfm?id=641879 * ACM Trans. on Math. Software, 29(1):49-57, 2003 * */ #include "sobol.h" #include "sobol_gpu.h" #include <helper_cuda.h> #define k_2powneg32 2.3283064E-10F __global__ void sobolGPU_kernel(unsigned n_vectors, unsigned n_dimensions, unsigned *d_directions, float *d_output) { __shared__ unsigned int v[n_directions]; // Offset into the correct dimension as specified by the // block y coordinate d_directions = d_directions + n_directions * blockIdx.y; d_output = d_output + n_vectors * blockIdx.y; // Copy the direction numbers for this dimension into shared // memory - there are only 32 direction numbers so only the // first 32 (n_directions) threads need participate. if (threadIdx.x < n_directions) { v[threadIdx.x] = d_directions[threadIdx.x]; } __syncthreads(); // Set initial index (i.e. which vector this thread is // computing first) and stride (i.e. step to the next vector // for this thread) int i0 = threadIdx.x + blockIdx.x * blockDim.x; int stride = gridDim.x * blockDim.x; // Get the gray code of the index // c.f. Numerical Recipes in C, chapter 20 // http://www.nrbook.com/a/bookcpdf/c20-2.pdf unsigned int g = i0 ^ (i0 >> 1); // Initialisation for first point x[i0] // In the Bratley and Fox paper this is equation (*), where // we are computing the value for x[n] without knowing the // value of x[n-1]. unsigned int X = 0; unsigned int mask; for (unsigned int k = 0 ; k < __ffs(stride) - 1 ; k++) { // We want X ^= g_k * v[k], where g_k is one or zero. // We do this by setting a mask with all bits equal to // g_k. In reality we keep shifting g so that g_k is the // LSB of g. This way we avoid multiplication. mask = - (g & 1); X ^= mask & v[k]; g = g >> 1; } if (i0 < n_vectors) { d_output[i0] = (float)X * k_2powneg32; } // Now do rest of points, using the stride // Here we want to generate x[i] from x[i-stride] where we // don't have any of the x in between, therefore we have to // revisit the equation (**), this is easiest with an example // so assume stride is 16. // From x[n] to x[n+16] there will be: // 8 changes in the first bit // 4 changes in the second bit // 2 changes in the third bit // 1 change in the fourth // 1 change in one of the remaining bits // // What this means is that in the equation: // x[n+1] = x[n] ^ v[p] // x[n+2] = x[n+1] ^ v[q] = x[n] ^ v[p] ^ v[q] // ... // We will apply xor with v[1] eight times, v[2] four times, // v[3] twice, v[4] once and one other direction number once. // Since two xors cancel out, we can skip even applications // and just apply xor with v[4] (i.e. log2(16)) and with // the current applicable direction number. // Note that all these indices count from 1, so we need to // subtract 1 from them all to account for C arrays counting // from zero. unsigned int v_log2stridem1 = v[__ffs(stride) - 2]; unsigned int v_stridemask = stride - 1; for (unsigned int i = i0 + stride ; i < n_vectors ; i += stride) { // x[i] = x[i-stride] ^ v[b] ^ v[c] // where b is log2(stride) minus 1 for C array indexing // where c is the index of the rightmost zero bit in i, // not including the bottom log2(stride) bits, minus 1 // for C array indexing // In the Bratley and Fox paper this is equation (**) X ^= v_log2stridem1 ^ v[__ffs(~((i - stride) | v_stridemask)) - 1]; d_output[i] = (float)X * k_2powneg32; } } extern "C" void sobolGPU(int n_vectors, int n_dimensions, unsigned int *d_directions, float *d_output) { //const int threadsperblock = 64; //const int threadsperblock = 32; //const int threadsperblock = 128; //const int threadsperblock = 256; //const int threadsperblock = 512; const int threadsperblock = 1024; // Set up the execution configuration dim3 dimGrid; dim3 dimBlock; int device; cudaDeviceProp prop; checkCudaErrors(cudaGetDevice(&device)); checkCudaErrors(cudaGetDeviceProperties(&prop, device)); // This implementation of the generator outputs all the draws for // one dimension in a contiguous region of memory, followed by the // next dimension and so on. // Therefore all threads within a block will be processing different // vectors from the same dimension. As a result we want the total // number of blocks to be a multiple of the number of dimensions. dimGrid.y = n_dimensions; // If the number of dimensions is large then we will set the number // of blocks to equal the number of dimensions (i.e. dimGrid.x = 1) // but if the number of dimensions is small (e.g. less than four per // multiprocessor) then we'll partition the vectors across blocks // (as well as threads). if (n_dimensions < (4 * prop.multiProcessorCount)) { dimGrid.x = 4 * prop.multiProcessorCount; } else { dimGrid.x = 1; } // Cap the dimGrid.x if the number of vectors is small if (dimGrid.x > (unsigned int)(n_vectors / threadsperblock)) { dimGrid.x = (n_vectors + threadsperblock - 1) / threadsperblock; } // Round up to a power of two, required for the algorithm so that // stride is a power of two. unsigned int targetDimGridX = dimGrid.x; for (dimGrid.x = 1 ; dimGrid.x < targetDimGridX ; dimGrid.x *= 2); // Fix the number of threads dimBlock.x = threadsperblock; // Execute GPU kernel sobolGPU_kernel<<<dimGrid, dimBlock>>>(n_vectors, n_dimensions, d_directions, d_output); }
6488bdde26d0eb665023af51b957df4a2ab270fc.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include "parse_oo.h" __global__ void initContext(GraphChiContext* context, int vertices, int edges) { int tid = blockDim.x * blockIdx.x + threadIdx.x; if (tid == 0) { context->setNumIterations(0); context->setNumVertices(vertices); context->setNumEdges(edges); } } __global__ void initObject(ChiVertex<int, int>** vertex, GraphChiContext* context, int* row, int* col, int* inrow, int* incol) { int tid = blockDim.x * blockIdx.x + threadIdx.x; if (tid < context->getNumVertices()) { int out_start = row[tid]; int out_end; if (tid + 1 < context->getNumVertices()) { out_end = row[tid + 1]; } else { out_end = context->getNumEdges(); } int in_start = inrow[tid]; int in_end; if (tid + 1 < context->getNumVertices()) { in_end = inrow[tid + 1]; } else { in_end = context->getNumEdges(); } int indegree = in_end - in_start; int outdegree = out_end - out_start; vertex[tid] = new ChiVertex<int, int>(tid, indegree, outdegree); vertex[tid]->setValue(INT_MAX); for (int i = in_start; i < in_end; i++) { vertex[tid]->setInEdge(i - in_start, incol[i], INT_MAX); } // for (int i = out_start; i < out_end; i++) { // vertex[tid]->setOutEdge(vertex, tid, i - out_start, col[i], 0.0f); //} } } __global__ void initOutEdge(ChiVertex<int, int>** vertex, GraphChiContext* context, int* row, int* col) { int tid = blockDim.x * blockIdx.x + threadIdx.x; if (tid < context->getNumVertices()) { int out_start = row[tid]; int out_end; if (tid + 1 < context->getNumVertices()) { out_end = row[tid + 1]; } else { out_end = context->getNumEdges(); } // int in_start = inrow[tid]; // int in_end; // if (tid + 1 < context->getNumVertices()) { // in_end = inrow[tid + 1]; //} else { // in_end = context->getNumEdges(); //} // int indegree = in_end - in_start; // int outdegree = out_end - out_start; // vertex[tid] = new ChiVertex<float, float>(tid, indegree, outdegree); // for (int i = in_start; i < in_end; i++) { // vertex[tid]->setInEdge(i - in_start, incol[i], 0.0f); //} for (int i = out_start; i < out_end; i++) { vertex[tid]->setOutEdge(vertex, tid, i - out_start, col[i], INT_MAX); } } } __managed__ int BFS_x; __global__ void BFS(ChiVertex<int, int>** vertex, GraphChiContext* context, int iteration) { int tid = blockDim.x * blockIdx.x + threadIdx.x; if (tid < context->getNumVertices()) { if (iteration == 0) { if (tid == 0) { vertex[tid]->setValue(0); int numOutEdge; numOutEdge = vertex[tid]->numOutEdges(); for (int i = 0; i < numOutEdge; i++) { ChiEdge<int>* outEdge; outEdge = vertex[tid]->getOutEdge(i); outEdge->setValue(1); } } } else { int curmin; BFS_x=0; curmin = vertex[tid]->getValue(); int numInEdge; numInEdge = vertex[tid]->numInEdges(); for (int i = 0; i < numInEdge; i++) { ChiEdge<int>* inEdge; inEdge = vertex[tid]->getInEdge(i); curmin = min(curmin, inEdge->getValue()); } int vertValue; vertValue = vertex[tid]->getValue(); if (curmin < vertValue) { vertex[tid]->setValue(curmin); int numOutEdge; numOutEdge = vertex[tid]->numOutEdges(); for (int i = 0; i < numOutEdge; i++) { ChiEdge<int>* outEdge; outEdge = vertex[tid]->getOutEdge(i); int edgeValue; edgeValue = outEdge->getValue(); if (edgeValue > curmin + 1) { BFS_x= 1; outEdge->setValue(curmin + 1); } } } } } } __global__ void copyBack(ChiVertex<int, int>** vertex, GraphChiContext* context, int* index) { int tid = blockDim.x * blockIdx.x + threadIdx.x; if (tid < context->getNumVertices()) { index[tid] = vertex[tid]->getValue(); } }
6488bdde26d0eb665023af51b957df4a2ab270fc.cu
#include "parse_oo.h" __global__ void initContext(GraphChiContext* context, int vertices, int edges) { int tid = blockDim.x * blockIdx.x + threadIdx.x; if (tid == 0) { context->setNumIterations(0); context->setNumVertices(vertices); context->setNumEdges(edges); } } __global__ void initObject(ChiVertex<int, int>** vertex, GraphChiContext* context, int* row, int* col, int* inrow, int* incol) { int tid = blockDim.x * blockIdx.x + threadIdx.x; if (tid < context->getNumVertices()) { int out_start = row[tid]; int out_end; if (tid + 1 < context->getNumVertices()) { out_end = row[tid + 1]; } else { out_end = context->getNumEdges(); } int in_start = inrow[tid]; int in_end; if (tid + 1 < context->getNumVertices()) { in_end = inrow[tid + 1]; } else { in_end = context->getNumEdges(); } int indegree = in_end - in_start; int outdegree = out_end - out_start; vertex[tid] = new ChiVertex<int, int>(tid, indegree, outdegree); vertex[tid]->setValue(INT_MAX); for (int i = in_start; i < in_end; i++) { vertex[tid]->setInEdge(i - in_start, incol[i], INT_MAX); } // for (int i = out_start; i < out_end; i++) { // vertex[tid]->setOutEdge(vertex, tid, i - out_start, col[i], 0.0f); //} } } __global__ void initOutEdge(ChiVertex<int, int>** vertex, GraphChiContext* context, int* row, int* col) { int tid = blockDim.x * blockIdx.x + threadIdx.x; if (tid < context->getNumVertices()) { int out_start = row[tid]; int out_end; if (tid + 1 < context->getNumVertices()) { out_end = row[tid + 1]; } else { out_end = context->getNumEdges(); } // int in_start = inrow[tid]; // int in_end; // if (tid + 1 < context->getNumVertices()) { // in_end = inrow[tid + 1]; //} else { // in_end = context->getNumEdges(); //} // int indegree = in_end - in_start; // int outdegree = out_end - out_start; // vertex[tid] = new ChiVertex<float, float>(tid, indegree, outdegree); // for (int i = in_start; i < in_end; i++) { // vertex[tid]->setInEdge(i - in_start, incol[i], 0.0f); //} for (int i = out_start; i < out_end; i++) { vertex[tid]->setOutEdge(vertex, tid, i - out_start, col[i], INT_MAX); } } } __managed__ int BFS_x; __global__ void BFS(ChiVertex<int, int>** vertex, GraphChiContext* context, int iteration) { int tid = blockDim.x * blockIdx.x + threadIdx.x; if (tid < context->getNumVertices()) { if (iteration == 0) { if (tid == 0) { vertex[tid]->setValue(0); int numOutEdge; numOutEdge = vertex[tid]->numOutEdges(); for (int i = 0; i < numOutEdge; i++) { ChiEdge<int>* outEdge; outEdge = vertex[tid]->getOutEdge(i); outEdge->setValue(1); } } } else { int curmin; BFS_x=0; curmin = vertex[tid]->getValue(); int numInEdge; numInEdge = vertex[tid]->numInEdges(); for (int i = 0; i < numInEdge; i++) { ChiEdge<int>* inEdge; inEdge = vertex[tid]->getInEdge(i); curmin = min(curmin, inEdge->getValue()); } int vertValue; vertValue = vertex[tid]->getValue(); if (curmin < vertValue) { vertex[tid]->setValue(curmin); int numOutEdge; numOutEdge = vertex[tid]->numOutEdges(); for (int i = 0; i < numOutEdge; i++) { ChiEdge<int>* outEdge; outEdge = vertex[tid]->getOutEdge(i); int edgeValue; edgeValue = outEdge->getValue(); if (edgeValue > curmin + 1) { BFS_x= 1; outEdge->setValue(curmin + 1); } } } } } } __global__ void copyBack(ChiVertex<int, int>** vertex, GraphChiContext* context, int* index) { int tid = blockDim.x * blockIdx.x + threadIdx.x; if (tid < context->getNumVertices()) { index[tid] = vertex[tid]->getValue(); } }
6a14c8a9645a2674f326b15764b2df5ff814d678.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /* * This sample implements a separable convolution * of a 2D image with an arbitrary filter. */ #include <stdio.h> #include <stdlib.h> #include <time.h> unsigned int filter_radius; typedef float myDataType; #define FILTER_LENGTH (2 * filter_radius + 1) #define ABS(val) ((val)<0.0 ? (-(val)) : (val)) #define cudaCheckError() { \ hipError_t error=hipGetLastError(); \ if(error!=hipSuccess) { \ printf("ERROR IN CUDA %s:%d: '%s'\n",__FILE__,__LINE__,hipGetErrorString(error)); \ hipDeviceReset(); \ exit(EXIT_FAILURE); \ } \ } __global__ void convolutionRowGPU(myDataType *d_Dst, myDataType *d_Src, myDataType *d_Filter,int imageW, int imageH, int filterR) { int x,y,k,d; x = threadIdx.x; y = threadIdx.y; myDataType sum = 0; for(k = -filterR; k <= filterR; k++) { d = x + k; if(d >= 0 && d < imageW) { sum += d_Src[y * imageW + d] * d_Filter[filterR -k]; } } //printf("ROW X:%d Y:%d SUM:%f\n\n",threadIdx.x,threadIdx.y,sum); d_Dst[y*imageW + x] = sum; } __global__ void convolutionColumnGPU(myDataType *d_Dst, myDataType *d_Src, myDataType *d_Filter, int imageW, int imageH, int filterR) { int x,y,k,d; x = threadIdx.x; y = threadIdx.y; myDataType sum = 0; for(k = -filterR; k <= filterR; k++) { d = y + k; if(d >= 0 && d < imageH) { sum += d_Src[d * imageW + x] * d_Filter[filterR -k]; //printf("X:%d Y:%d SUM:%f\n\n",threadIdx.x,threadIdx.y,sum); } } //printf("COL X:%d Y:%d SUM:%f\n\n",threadIdx.x,threadIdx.y,sum); d_Dst[y * imageW + x] = sum; } //////////////////////////////////////////////////////////////////////////////// // Reference row convolution filter //////////////////////////////////////////////////////////////////////////////// void convolutionRowCPU(myDataType *h_Dst, myDataType *h_Src, myDataType *h_Filter, int imageW, int imageH, int filterR) { int x, y, k; for (y = 0; y < imageH; y++) { for (x = 0; x < imageW; x++) { myDataType sum = 0; for (k = -filterR; k <= filterR; k++) { int d = x + k; if (d >= 0 && d < imageW) { sum += h_Src[y * imageW + d] * h_Filter[filterR - k]; } } //printf("ROW X:%d Y:%d SUM:%f\n\n",x,y,sum); h_Dst[y * imageW + x] = sum; } } } //////////////////////////////////////////////////////////////////////////////// // Reference column convolution filter //////////////////////////////////////////////////////////////////////////////// void convolutionColumnCPU(myDataType *h_Dst, myDataType *h_Src, myDataType *h_Filter, int imageW, int imageH, int filterR) { int x, y, k; for (y = 0; y < imageH; y++) { for (x = 0; x < imageW; x++) { myDataType sum = 0; for (k = -filterR; k <= filterR; k++) { int d = y + k; if (d >= 0 && d < imageH) { sum += h_Src[d * imageW + x] * h_Filter[filterR - k]; } } //printf("COL X:%d Y:%d SUM:%f\n\n",x,y,sum); h_Dst[y * imageW + x] = sum; } } } //////////////////////////////////////////////////////////////////////////////// // Main program //////////////////////////////////////////////////////////////////////////////// int main(int argc, char **argv) { myDataType *h_Filter, *h_Input, *h_Buffer, *h_OutputCPU, *d_Filter, *d_Input, *d_Buffer, *d_OutputGPU, *h_OutputGPU; int imageW; int imageH; //int count=0; unsigned int i; double accuracy; double timing; clock_t start; clock_t end; printf("Enter filter radius : "); scanf(" %d", &filter_radius); // Ta imageW, imageH ta dinei o xrhsths kai thewroume oti einai isa, // dhladh imageW = imageH = N, opou to N to dinei o xrhsths. // Gia aplothta thewroume tetragwnikes eikones. printf("Enter image size. Should be a power of two and greater than %d : ", FILTER_LENGTH); scanf(" %d", &imageW); imageH = imageW; printf("Enter Accuracy:"); scanf(" %lf", &accuracy); dim3 threads(imageH,imageW); printf("Image Width x Height = %i x %i\n\n", imageW, imageH); printf("Allocating and initializing host arrays and device array...\n"); // Tha htan kalh idea na elegxete kai to apotelesma twn malloc... h_Filter = (myDataType *)malloc(FILTER_LENGTH * sizeof(myDataType)); h_Input = (myDataType *)malloc(imageW * imageH * sizeof(myDataType)); h_Buffer = (myDataType *)malloc(imageW * imageH * sizeof(myDataType)); h_OutputCPU = (myDataType *)malloc(imageW * imageH * sizeof(myDataType)); h_OutputGPU = (myDataType *)malloc(imageW * imageH * sizeof(myDataType)); if (h_Filter==NULL || h_Input == NULL || h_Buffer == NULL || h_OutputCPU == NULL){ printf("Something went wrong wille malloc in CPU\n"); } printf("Memmory allocation for host arrays: COMPLETED \n"); hipMallocManaged((void**)&d_Filter,FILTER_LENGTH * sizeof(myDataType)); hipMallocManaged((void**)&d_Input,imageH * imageW * sizeof(myDataType)); hipMallocManaged((void**)&d_Buffer,imageH * imageW * sizeof(myDataType)); hipMallocManaged((void**)&d_OutputGPU,imageH * imageW * sizeof(myDataType)); cudaCheckError(); printf("Memmory allocation for device arrays: COMPLETED \n"); // to 'h_Filter' apotelei to filtro me to opoio ginetai to convolution kai // arxikopoieitai tuxaia. To 'h_Input' einai h eikona panw sthn opoia ginetai // to convolution kai arxikopoieitai kai auth tuxaia. srand(200); for (i = 0; i < FILTER_LENGTH; i++) { h_Filter[i] = (myDataType)(rand() % 16); } for (i = 0; i < imageW * imageH; i++) { h_Input[i] = (myDataType)rand() / ((myDataType)RAND_MAX / 255) + (myDataType)rand() / (myDataType)RAND_MAX; } printf("initialization of host arrays: COMPLETED \n"); hipMemcpy(d_Filter, h_Filter,FILTER_LENGTH * sizeof(myDataType),hipMemcpyHostToDevice); hipMemcpy(d_Input, h_Input,imageH * imageW * sizeof(myDataType),hipMemcpyHostToDevice); cudaCheckError(); printf("initialization of device arrays: COMPLETED \n\n"); printf("GPU computation...\n"); hipLaunchKernelGGL(( convolutionRowGPU), dim3(1),dim3(threads), 0, 0, d_Buffer,d_Input,d_Filter,imageW,imageH,filter_radius); cudaCheckError(); hipDeviceSynchronize(); hipLaunchKernelGGL(( convolutionColumnGPU), dim3(1),dim3(threads), 0, 0, d_OutputGPU,d_Buffer,d_Filter,imageW,imageH,filter_radius); cudaCheckError(); printf("GPU computation : COMPLETED\n\n"); hipMemcpy(h_OutputGPU,d_OutputGPU,imageH * imageW * sizeof(myDataType),hipMemcpyDeviceToHost); // To parakatw einai to kommati pou ekteleitai sthn CPU kai me vash auto prepei na ginei h sugrish me thn GPU. printf("CPU computation...\n"); start = clock(); convolutionRowCPU(h_Buffer, h_Input, h_Filter, imageW, imageH, filter_radius); // convolution kata grammes convolutionColumnCPU(h_OutputCPU, h_Buffer, h_Filter, imageW, imageH, filter_radius); // convolution kata sthles end = clock(); timing = ((double) (end - start)) / CLOCKS_PER_SEC; printf("CPU computation : COMPLETED in time:%10.8f\n",timing); // Kanete h sugrish anamesa se GPU kai CPU kai an estw kai kapoio apotelesma xeperna thn akriveia // pou exoume orisei, tote exoume sfalma kai mporoume endexomenws na termatisoume to programma mas printf("\nCPU computations == GPU computation?\n"); for (i = 0; i < imageW * imageH; i++) { if(h_OutputGPU[i] > h_OutputCPU[i] + accuracy || h_OutputGPU[i] < h_OutputCPU[i] - accuracy){ printf("CPU computations == GPU computation : FALSE line:%d difrence:%f \nExitting program after Memmory Free...\n",i,h_OutputGPU[i]-h_OutputCPU[i]); //count++; // free all the allocated memory CPU free(h_OutputCPU); free(h_OutputGPU); free(h_Buffer); free(h_Input); free(h_Filter); // free all the allocated memory GPU hipFree(d_OutputGPU); hipFree(d_Buffer); hipFree(d_Input); hipFree(d_Filter); cudaCheckError(); hipDeviceReset(); return(1); } } printf("CPU computations == GPU computation : TRUE \nExitting program after Memmory Free...\n"); // free all the allocated memory CPU free(h_OutputCPU); free(h_OutputGPU); free(h_Buffer); free(h_Input); free(h_Filter); // free all the allocated memory GPU hipFree(d_OutputGPU); hipFree(d_Buffer); hipFree(d_Input); hipFree(d_Filter); hipDeviceReset(); return 0; }
6a14c8a9645a2674f326b15764b2df5ff814d678.cu
/* * This sample implements a separable convolution * of a 2D image with an arbitrary filter. */ #include <stdio.h> #include <stdlib.h> #include <time.h> unsigned int filter_radius; typedef float myDataType; #define FILTER_LENGTH (2 * filter_radius + 1) #define ABS(val) ((val)<0.0 ? (-(val)) : (val)) #define cudaCheckError() { \ cudaError_t error=cudaGetLastError(); \ if(error!=cudaSuccess) { \ printf("ERROR IN CUDA %s:%d: '%s'\n",__FILE__,__LINE__,cudaGetErrorString(error)); \ cudaDeviceReset(); \ exit(EXIT_FAILURE); \ } \ } __global__ void convolutionRowGPU(myDataType *d_Dst, myDataType *d_Src, myDataType *d_Filter,int imageW, int imageH, int filterR) { int x,y,k,d; x = threadIdx.x; y = threadIdx.y; myDataType sum = 0; for(k = -filterR; k <= filterR; k++) { d = x + k; if(d >= 0 && d < imageW) { sum += d_Src[y * imageW + d] * d_Filter[filterR -k]; } } //printf("ROW X:%d Y:%d SUM:%f\n\n",threadIdx.x,threadIdx.y,sum); d_Dst[y*imageW + x] = sum; } __global__ void convolutionColumnGPU(myDataType *d_Dst, myDataType *d_Src, myDataType *d_Filter, int imageW, int imageH, int filterR) { int x,y,k,d; x = threadIdx.x; y = threadIdx.y; myDataType sum = 0; for(k = -filterR; k <= filterR; k++) { d = y + k; if(d >= 0 && d < imageH) { sum += d_Src[d * imageW + x] * d_Filter[filterR -k]; //printf("X:%d Y:%d SUM:%f\n\n",threadIdx.x,threadIdx.y,sum); } } //printf("COL X:%d Y:%d SUM:%f\n\n",threadIdx.x,threadIdx.y,sum); d_Dst[y * imageW + x] = sum; } //////////////////////////////////////////////////////////////////////////////// // Reference row convolution filter //////////////////////////////////////////////////////////////////////////////// void convolutionRowCPU(myDataType *h_Dst, myDataType *h_Src, myDataType *h_Filter, int imageW, int imageH, int filterR) { int x, y, k; for (y = 0; y < imageH; y++) { for (x = 0; x < imageW; x++) { myDataType sum = 0; for (k = -filterR; k <= filterR; k++) { int d = x + k; if (d >= 0 && d < imageW) { sum += h_Src[y * imageW + d] * h_Filter[filterR - k]; } } //printf("ROW X:%d Y:%d SUM:%f\n\n",x,y,sum); h_Dst[y * imageW + x] = sum; } } } //////////////////////////////////////////////////////////////////////////////// // Reference column convolution filter //////////////////////////////////////////////////////////////////////////////// void convolutionColumnCPU(myDataType *h_Dst, myDataType *h_Src, myDataType *h_Filter, int imageW, int imageH, int filterR) { int x, y, k; for (y = 0; y < imageH; y++) { for (x = 0; x < imageW; x++) { myDataType sum = 0; for (k = -filterR; k <= filterR; k++) { int d = y + k; if (d >= 0 && d < imageH) { sum += h_Src[d * imageW + x] * h_Filter[filterR - k]; } } //printf("COL X:%d Y:%d SUM:%f\n\n",x,y,sum); h_Dst[y * imageW + x] = sum; } } } //////////////////////////////////////////////////////////////////////////////// // Main program //////////////////////////////////////////////////////////////////////////////// int main(int argc, char **argv) { myDataType *h_Filter, *h_Input, *h_Buffer, *h_OutputCPU, *d_Filter, *d_Input, *d_Buffer, *d_OutputGPU, *h_OutputGPU; int imageW; int imageH; //int count=0; unsigned int i; double accuracy; double timing; clock_t start; clock_t end; printf("Enter filter radius : "); scanf(" %d", &filter_radius); // Ta imageW, imageH ta dinei o xrhsths kai thewroume oti einai isa, // dhladh imageW = imageH = N, opou to N to dinei o xrhsths. // Gia aplothta thewroume tetragwnikes eikones. printf("Enter image size. Should be a power of two and greater than %d : ", FILTER_LENGTH); scanf(" %d", &imageW); imageH = imageW; printf("Enter Accuracy:"); scanf(" %lf", &accuracy); dim3 threads(imageH,imageW); printf("Image Width x Height = %i x %i\n\n", imageW, imageH); printf("Allocating and initializing host arrays and device array...\n"); // Tha htan kalh idea na elegxete kai to apotelesma twn malloc... h_Filter = (myDataType *)malloc(FILTER_LENGTH * sizeof(myDataType)); h_Input = (myDataType *)malloc(imageW * imageH * sizeof(myDataType)); h_Buffer = (myDataType *)malloc(imageW * imageH * sizeof(myDataType)); h_OutputCPU = (myDataType *)malloc(imageW * imageH * sizeof(myDataType)); h_OutputGPU = (myDataType *)malloc(imageW * imageH * sizeof(myDataType)); if (h_Filter==NULL || h_Input == NULL || h_Buffer == NULL || h_OutputCPU == NULL){ printf("Something went wrong wille malloc in CPU\n"); } printf("Memmory allocation for host arrays: COMPLETED \n"); cudaMallocManaged((void**)&d_Filter,FILTER_LENGTH * sizeof(myDataType)); cudaMallocManaged((void**)&d_Input,imageH * imageW * sizeof(myDataType)); cudaMallocManaged((void**)&d_Buffer,imageH * imageW * sizeof(myDataType)); cudaMallocManaged((void**)&d_OutputGPU,imageH * imageW * sizeof(myDataType)); cudaCheckError(); printf("Memmory allocation for device arrays: COMPLETED \n"); // to 'h_Filter' apotelei to filtro me to opoio ginetai to convolution kai // arxikopoieitai tuxaia. To 'h_Input' einai h eikona panw sthn opoia ginetai // to convolution kai arxikopoieitai kai auth tuxaia. srand(200); for (i = 0; i < FILTER_LENGTH; i++) { h_Filter[i] = (myDataType)(rand() % 16); } for (i = 0; i < imageW * imageH; i++) { h_Input[i] = (myDataType)rand() / ((myDataType)RAND_MAX / 255) + (myDataType)rand() / (myDataType)RAND_MAX; } printf("initialization of host arrays: COMPLETED \n"); cudaMemcpy(d_Filter, h_Filter,FILTER_LENGTH * sizeof(myDataType),cudaMemcpyHostToDevice); cudaMemcpy(d_Input, h_Input,imageH * imageW * sizeof(myDataType),cudaMemcpyHostToDevice); cudaCheckError(); printf("initialization of device arrays: COMPLETED \n\n"); printf("GPU computation...\n"); convolutionRowGPU<<<1,threads>>>(d_Buffer,d_Input,d_Filter,imageW,imageH,filter_radius); cudaCheckError(); cudaDeviceSynchronize(); convolutionColumnGPU<<<1,threads>>>(d_OutputGPU,d_Buffer,d_Filter,imageW,imageH,filter_radius); cudaCheckError(); printf("GPU computation : COMPLETED\n\n"); cudaMemcpy(h_OutputGPU,d_OutputGPU,imageH * imageW * sizeof(myDataType),cudaMemcpyDeviceToHost); // To parakatw einai to kommati pou ekteleitai sthn CPU kai me vash auto prepei na ginei h sugrish me thn GPU. printf("CPU computation...\n"); start = clock(); convolutionRowCPU(h_Buffer, h_Input, h_Filter, imageW, imageH, filter_radius); // convolution kata grammes convolutionColumnCPU(h_OutputCPU, h_Buffer, h_Filter, imageW, imageH, filter_radius); // convolution kata sthles end = clock(); timing = ((double) (end - start)) / CLOCKS_PER_SEC; printf("CPU computation : COMPLETED in time:%10.8f\n",timing); // Kanete h sugrish anamesa se GPU kai CPU kai an estw kai kapoio apotelesma xeperna thn akriveia // pou exoume orisei, tote exoume sfalma kai mporoume endexomenws na termatisoume to programma mas printf("\nCPU computations == GPU computation?\n"); for (i = 0; i < imageW * imageH; i++) { if(h_OutputGPU[i] > h_OutputCPU[i] + accuracy || h_OutputGPU[i] < h_OutputCPU[i] - accuracy){ printf("CPU computations == GPU computation : FALSE line:%d difrence:%f \nExitting program after Memmory Free...\n",i,h_OutputGPU[i]-h_OutputCPU[i]); //count++; // free all the allocated memory CPU free(h_OutputCPU); free(h_OutputGPU); free(h_Buffer); free(h_Input); free(h_Filter); // free all the allocated memory GPU cudaFree(d_OutputGPU); cudaFree(d_Buffer); cudaFree(d_Input); cudaFree(d_Filter); cudaCheckError(); cudaDeviceReset(); return(1); } } printf("CPU computations == GPU computation : TRUE \nExitting program after Memmory Free...\n"); // free all the allocated memory CPU free(h_OutputCPU); free(h_OutputGPU); free(h_Buffer); free(h_Input); free(h_Filter); // free all the allocated memory GPU cudaFree(d_OutputGPU); cudaFree(d_Buffer); cudaFree(d_Input); cudaFree(d_Filter); cudaDeviceReset(); return 0; }
ec39490a4fd8057783fa44db291df7c518398eee.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include "Indice2D.h" #include "Indice1D.h" #include "cudaTools.h" #include "reductionADD.h" #include <stdio.h> /*----------------------------------------------------------------------*\ |* Declaration *| \*---------------------------------------------------------------------*/ /*--------------------------------------*\ |* Imported *| \*-------------------------------------*/ /*--------------------------------------*\ |* Public *| \*-------------------------------------*/ __global__ void slice(float* ptrGMResultat, int nbSlice); __device__ float fonctionPi(float x); __device__ void reductionIntraThread(float* tabSM, int nbSlice); /*----------------------------------------------------------------------*\ |* Implementation *| \*---------------------------------------------------------------------*/ /*--------------------------------------*\ |* Public *| \*-------------------------------------*/ /** * output : void required !! */ __global__ void slice(float* ptrGMResultat, int nbSlice) { extern __shared__ float tabSM[]; reductionIntraThread(tabSM, nbSlice); __syncthreads(); reductionADD<float>(tabSM, ptrGMResultat); } /*--------------------------------------*\ |* Private *| \*-------------------------------------*/ __device__ float fonctionPi(float x) { return 4.f / (1.f + x * x); } __device__ void reductionIntraThread(float* tabSM, int nbSlice) { const int NB_THREAD=Indice1D::nbThread(); const int TID=Indice2D::tid(); const int TID_LOCAL=Indice2D::tidLocal(); const float DX = 1.f / (float) nbSlice; int s = TID; float sumLocal = 0; while (s<nbSlice) { sumLocal+=fonctionPi(DX*s); s+= NB_THREAD; } tabSM[TID_LOCAL] = sumLocal*DX; } /*----------------------------------------------------------------------*\ |* End *| \*---------------------------------------------------------------------*/
ec39490a4fd8057783fa44db291df7c518398eee.cu
#include "Indice2D.h" #include "Indice1D.h" #include "cudaTools.h" #include "reductionADD.h" #include <stdio.h> /*----------------------------------------------------------------------*\ |* Declaration *| \*---------------------------------------------------------------------*/ /*--------------------------------------*\ |* Imported *| \*-------------------------------------*/ /*--------------------------------------*\ |* Public *| \*-------------------------------------*/ __global__ void slice(float* ptrGMResultat, int nbSlice); __device__ float fonctionPi(float x); __device__ void reductionIntraThread(float* tabSM, int nbSlice); /*----------------------------------------------------------------------*\ |* Implementation *| \*---------------------------------------------------------------------*/ /*--------------------------------------*\ |* Public *| \*-------------------------------------*/ /** * output : void required !! */ __global__ void slice(float* ptrGMResultat, int nbSlice) { extern __shared__ float tabSM[]; reductionIntraThread(tabSM, nbSlice); __syncthreads(); reductionADD<float>(tabSM, ptrGMResultat); } /*--------------------------------------*\ |* Private *| \*-------------------------------------*/ __device__ float fonctionPi(float x) { return 4.f / (1.f + x * x); } __device__ void reductionIntraThread(float* tabSM, int nbSlice) { const int NB_THREAD=Indice1D::nbThread(); const int TID=Indice2D::tid(); const int TID_LOCAL=Indice2D::tidLocal(); const float DX = 1.f / (float) nbSlice; int s = TID; float sumLocal = 0; while (s<nbSlice) { sumLocal+=fonctionPi(DX*s); s+= NB_THREAD; } tabSM[TID_LOCAL] = sumLocal*DX; } /*----------------------------------------------------------------------*\ |* End *| \*---------------------------------------------------------------------*/
733855414154bdf7ea701ac10465b1fe317ed4b3.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /* * Copyright (c) 2017-2019, NVIDIA CORPORATION. All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * * Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * * Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * Neither the name of NVIDIA CORPORATION nor the names of its * contributors may be used to endorse or promote products derived * from this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ``AS IS'' AND ANY * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ #ifdef LINUX // Only supported by gcc on Linux (defined in Makefile) #define JITIFY_ENABLE_EMBEDDED_FILES 1 #endif #define JITIFY_PRINT_INSTANTIATION 1 #define JITIFY_PRINT_SOURCE 1 #define JITIFY_PRINT_LOG 1 #define JITIFY_PRINT_PTX 1 #define JITIFY_PRINT_LINKER_LOG 1 #define JITIFY_PRINT_LAUNCH 1 #define JITIFY_PRINT_HEADER_PATHS 1 #include "jitify.hpp" #include "example_headers/my_header1.cuh.jit" #ifdef LINUX // Only supported by gcc on Linux (defined in Makefile) JITIFY_INCLUDE_EMBEDDED_FILE(example_headers_my_header2_cuh); #endif #include "gtest/gtest.h" #include <cstdio> #include <fstream> #include <iostream> #include <memory> #define CHECK_CUDA(call) \ do { \ hipError_t status = call; \ if (status != hipSuccess) { \ const char* str; \ hipGetErrorName(status, &str); \ std::cout << "(CUDA) returned " << str; \ std::cout << " (" << __FILE__ << ":" << __LINE__ << ":" << __func__ \ << "())" << std::endl; \ ASSERT_EQ(status, hipSuccess); \ } \ } while (0) #define CHECK_CUDART(call) \ do { \ hipError_t status = call; \ if (status != hipSuccess) { \ std::cout << "(CUDART) returned " << hipGetErrorString(status); \ std::cout << " (" << __FILE__ << ":" << __LINE__ << ":" << __func__ \ << "())" << std::endl; \ ASSERT_EQ(status, hipSuccess); \ } \ } while (0) std::istream* file_callback(std::string filename, std::iostream& tmp_stream) { // User returns NULL or pointer to stream containing file source // Note: tmp_stream is provided for convenience if (filename == "example_headers/my_header4.cuh") { tmp_stream << "#pragma once\n" "template<typename T>\n" "T pointless_func(T x) {\n" " return x;\n" "}\n"; return &tmp_stream; } else { // Find this file through other mechanisms return 0; } } static const char* const simple_program_source = "my_program\n" "template<int N, typename T>\n" "__global__\n" "void my_kernel(T* data) {\n" " if (blockIdx.x != 0 || threadIdx.x != 0) return;\n" " T data0 = data[0];\n" " for( int i=0; i<N-1; ++i ) {\n" " data[0] *= data0;\n" " }\n" "}\n"; TEST(JitifyTest, Simple) { static jitify::JitCache kernel_cache; jitify::Program program = kernel_cache.program(simple_program_source); typedef float T; T* d_data; CHECK_CUDART(hipMalloc((void**)&d_data, sizeof(T))); dim3 grid(1); dim3 block(1); using jitify::reflection::type_of; auto kernel_inst = program.kernel("my_kernel").instantiate(3, type_of(*d_data)); T h_data = 5; CHECK_CUDART(hipMemcpy(d_data, &h_data, sizeof(T), hipMemcpyHostToDevice)); CHECK_CUDA(kernel_inst.configure(grid, block).launch(d_data)); CHECK_CUDART(hipMemcpy(&h_data, d_data, sizeof(T), hipMemcpyDeviceToHost)); EXPECT_FLOAT_EQ(h_data, 125.f); h_data = 5; CHECK_CUDART(hipMemcpy(d_data, &h_data, sizeof(T), hipMemcpyHostToDevice)); CHECK_CUDA(kernel_inst.configure_1d_max_occupancy().launch(d_data)); CHECK_CUDART(hipMemcpy(&h_data, d_data, sizeof(T), hipMemcpyDeviceToHost)); EXPECT_FLOAT_EQ(h_data, 125.f); CHECK_CUDART(hipFree(d_data)); } TEST(JitifyTest, Simple_experimental) { std::vector<std::string> opts; jitify::experimental::Program program_orig(simple_program_source, {}, opts); auto program = jitify::experimental::Program::deserialize(program_orig.serialize()); typedef float T; T* d_data; CHECK_CUDART(hipMalloc((void**)&d_data, sizeof(T))); dim3 grid(1); dim3 block(1); using jitify::reflection::type_of; auto kernel_inst_orig = program.kernel("my_kernel").instantiate(3, type_of(*d_data)); auto kernel_inst = jitify::experimental::KernelInstantiation::deserialize( kernel_inst_orig.serialize()); T h_data = 5; CHECK_CUDART(hipMemcpy(d_data, &h_data, sizeof(T), hipMemcpyHostToDevice)); CHECK_CUDA(kernel_inst.configure(grid, block).launch(d_data)); CHECK_CUDART(hipMemcpy(&h_data, d_data, sizeof(T), hipMemcpyDeviceToHost)); EXPECT_FLOAT_EQ(h_data, 125.f); h_data = 5; CHECK_CUDART(hipMemcpy(d_data, &h_data, sizeof(T), hipMemcpyHostToDevice)); CHECK_CUDA(kernel_inst.configure_1d_max_occupancy().launch(d_data)); CHECK_CUDART(hipMemcpy(&h_data, d_data, sizeof(T), hipMemcpyDeviceToHost)); EXPECT_FLOAT_EQ(h_data, 125.f); CHECK_CUDART(hipFree(d_data)); } static const char* const multiple_kernels_program_source = "my_program1\n" "#include \"example_headers/my_header1.cuh\"\n" "#include \"example_headers/my_header2.cuh\"\n" "#include \"example_headers/my_header3.cuh\"\n" "#include \"example_headers/my_header4.cuh\"\n" "\n" "__global__\n" "void my_kernel1(float const* indata, float* outdata) {\n" " outdata[0] = indata[0] + 1;\n" " outdata[0] -= 1;\n" "}\n" "\n" "template<int C, typename T>\n" "__global__\n" "void my_kernel2(float const* indata, float* outdata) {\n" " for( int i=0; i<C; ++i ) {\n" " outdata[0] = " "pointless_func(identity(sqrt(square(negate(indata[0])))));\n" " }\n" "}\n"; TEST(JitifyTest, MultipleKernels) { using jitify::reflection::instance_of; using jitify::reflection::NonType; using jitify::reflection::reflect; using jitify::reflection::Type; using jitify::reflection::type_of; thread_local static jitify::JitCache kernel_cache; jitify::Program program = kernel_cache.program( multiple_kernels_program_source, // Code string specified above {example_headers_my_header1_cuh}, // Code string generated by stringify {"--use_fast_math", "-I/usr/local/cuda/include"}, file_callback); typedef float T; T* indata; T* outdata; CHECK_CUDART(hipMalloc((void**)&indata, sizeof(T))); CHECK_CUDART(hipMalloc((void**)&outdata, sizeof(T))); T inval = 3.14159f; CHECK_CUDART(hipMemcpy(indata, &inval, sizeof(T), hipMemcpyHostToDevice)); dim3 grid(1); dim3 block(1); CHECK_CUDA(program.kernel("my_kernel1") .instantiate() .configure(grid, block) .launch(indata, outdata)); enum { C = 123 }; // These invocations are all equivalent and will come from cache after the 1st CHECK_CUDA((program.kernel("my_kernel2") .instantiate<NonType<int, C>, T>() .configure(grid, block) .launch(indata, outdata))); CHECK_CUDA(program.kernel("my_kernel2") .instantiate({reflect((int)C), reflect<T>()}) .configure(grid, block) .launch(indata, outdata)); // Recommended versions CHECK_CUDA(program.kernel("my_kernel2") .instantiate((int)C, Type<T>()) .configure(grid, block) .launch(indata, outdata)); CHECK_CUDA(program.kernel("my_kernel2") .instantiate((int)C, type_of(*indata)) .configure(grid, block) .launch(indata, outdata)); CHECK_CUDA(program.kernel("my_kernel2") .instantiate((int)C, instance_of(*indata)) .configure(grid, block) .launch(indata, outdata)); T outval = 0; CHECK_CUDART(hipMemcpy(&outval, outdata, sizeof(T), hipMemcpyDeviceToHost)); CHECK_CUDART(hipFree(outdata)); CHECK_CUDART(hipFree(indata)); EXPECT_FLOAT_EQ(inval, outval); } TEST(JitifyTest, MultipleKernels_experimental) { using jitify::reflection::instance_of; using jitify::reflection::NonType; using jitify::reflection::reflect; using jitify::reflection::Type; using jitify::reflection::type_of; jitify::experimental::Program program_orig( multiple_kernels_program_source, // Code string specified above {example_headers_my_header1_cuh}, // Code string generated by stringify {"--use_fast_math", "-I/usr/local/cuda/include"}, file_callback); auto program = jitify::experimental::Program::deserialize(program_orig.serialize()); typedef float T; T* indata; T* outdata; CHECK_CUDART(hipMalloc((void**)&indata, sizeof(T))); CHECK_CUDART(hipMalloc((void**)&outdata, sizeof(T))); T inval = 3.14159f; CHECK_CUDART(hipMemcpy(indata, &inval, sizeof(T), hipMemcpyHostToDevice)); dim3 grid(1); dim3 block(1); CHECK_CUDA(program.kernel("my_kernel1") .instantiate() .configure(grid, block) .launch(indata, outdata)); enum { C = 123 }; // These invocations are all equivalent. CHECK_CUDA(jitify::experimental::KernelInstantiation::deserialize( program.kernel("my_kernel2") .instantiate<NonType<int, C>, T>() .serialize()) .configure(grid, block) .launch(indata, outdata)); CHECK_CUDA(jitify::experimental::KernelInstantiation::deserialize( program.kernel("my_kernel2") .instantiate({reflect((int)C), reflect<T>()}) .serialize()) .configure(grid, block) .launch(indata, outdata)); // Recommended versions CHECK_CUDA(jitify::experimental::KernelInstantiation::deserialize( program.kernel("my_kernel2") .instantiate((int)C, Type<T>()) .serialize()) .configure(grid, block) .launch(indata, outdata)); CHECK_CUDA(jitify::experimental::KernelInstantiation::deserialize( program.kernel("my_kernel2") .instantiate((int)C, type_of(*indata)) .serialize()) .configure(grid, block) .launch(indata, outdata)); CHECK_CUDA(jitify::experimental::KernelInstantiation::deserialize( program.kernel("my_kernel2") .instantiate((int)C, instance_of(*indata)) .serialize()) .configure(grid, block) .launch(indata, outdata)); T outval = 0; CHECK_CUDART(hipMemcpy(&outval, outdata, sizeof(T), hipMemcpyDeviceToHost)); CHECK_CUDART(hipFree(outdata)); CHECK_CUDART(hipFree(indata)); EXPECT_FLOAT_EQ(inval, outval); } static const char* const constmem_program_source = "constmem_program\n" "#pragma once\n" "\n" "__constant__ int a;\n" "__device__ int d;\n" "namespace b { __constant__ int a; __device__ int d; }\n" "namespace c { namespace b { __constant__ int a; __device__ int d; } }\n" "namespace x { __constant__ int a = 3; __device__ int d = 7; }\n" "namespace y { __constant__ int a[] = {4, 5}; __device__ int d[] = {8, 9}; " "}\n" "\n" "__global__ void constant_test(int *x) {\n" " x[0] = a;\n" " x[1] = b::a;\n" " x[2] = c::b::a;\n" " x[3] = d;\n" " x[4] = b::d;\n" " x[5] = c::b::d;\n" " x[6] = x::a;\n" " x[7] = x::d;\n" " x[8] = y::a[0];\n" " x[9] = y::a[1];\n" " x[10] = y::d[0];\n" " x[11] = y::d[1];\n" "}\n"; TEST(JitifyTest, ConstantMemory) { using jitify::reflection::Type; thread_local static jitify::JitCache kernel_cache; constexpr int n_const = 12; int* outdata; CHECK_CUDART(hipMalloc((void**)&outdata, n_const * sizeof(int))); dim3 grid(1); dim3 block(1); { // test __constant__ look up in kernel string using diffrent namespaces jitify::Program program = kernel_cache.program(constmem_program_source, 0, {"--use_fast_math", "-I/usr/local/cuda/include"}); auto instance = program.kernel("constant_test").instantiate(); int inval[] = {2, 4, 8, 12, 14, 18, 22, 26, 30, 34, 38, 42}; int dval; CHECK_CUDA(instance.get_global_value("x::a", &dval)); EXPECT_EQ(dval, 3); CHECK_CUDA(instance.get_global_value("x::d", &dval)); EXPECT_EQ(dval, 7); int darr[2]; CHECK_CUDA(instance.get_global_array("y::a", &darr[0], 2)); EXPECT_EQ(darr[0], 4); EXPECT_EQ(darr[1], 5); CHECK_CUDA(instance.get_global_value("y::d", &darr)); EXPECT_EQ(darr[0], 8); EXPECT_EQ(darr[1], 9); CHECK_CUDA(instance.set_global_value("a", inval[0])); CHECK_CUDA(instance.set_global_value("b::a", inval[1])); CHECK_CUDA(instance.set_global_value("c::b::a", inval[2])); CHECK_CUDA(instance.set_global_value("d", inval[3])); CHECK_CUDA(instance.set_global_value("b::d", inval[4])); CHECK_CUDA(instance.set_global_value("c::b::d", inval[5])); CHECK_CUDA(instance.set_global_value("x::a", inval[6])); CHECK_CUDA(instance.set_global_value("x::d", inval[7])); CHECK_CUDA(instance.set_global_array("y::a", &inval[8], 2)); int inarr[] = {inval[10], inval[11]}; CHECK_CUDA(instance.set_global_value("y::d", inarr)); CHECK_CUDA(instance.configure(grid, block).launch(outdata)); CHECK_CUDART(hipDeviceSynchronize()); int outval[n_const]; CHECK_CUDART( hipMemcpy(outval, outdata, sizeof(outval), hipMemcpyDeviceToHost)); for (int i = 0; i < n_const; i++) { EXPECT_EQ(inval[i], outval[i]); } } { // test __constant__ array look up in header nested in both anonymous and // explicit namespace jitify::Program program = kernel_cache.program("example_headers/constant_header.cuh", 0, {"--use_fast_math", "-I/usr/local/cuda/include"}); auto instance = program.kernel("constant_test2").instantiate(); constexpr int n_anon_const = 6; int inval[] = {3, 5, 9, 13, 15, 19}; CHECK_CUDA( cuMemcpyHtoD(instance.get_constant_ptr("(anonymous namespace)::b::a"), inval, sizeof(inval) / 2)); CHECK_CUDA( cuMemcpyHtoD(instance.get_global_ptr("(anonymous namespace)::b::d"), inval + 3, sizeof(inval) / 2)); CHECK_CUDA(instance.configure(grid, block).launch(outdata)); int outval[n_anon_const]; CHECK_CUDART( hipMemcpy(outval, outdata, sizeof(outval), hipMemcpyDeviceToHost)); for (int i = 0; i < n_anon_const; i++) { EXPECT_EQ(inval[i], outval[i]); } } CHECK_CUDART(hipFree(outdata)); } TEST(JitifyTest, ConstantMemory_experimental) { using jitify::reflection::Type; constexpr int n_const = 12; int* outdata; CHECK_CUDART(hipMalloc((void**)&outdata, n_const * sizeof(int))); dim3 grid(1); dim3 block(1); { // test __constant__ look up in kernel string using different namespaces jitify::experimental::Program program_orig( constmem_program_source, {}, {"--use_fast_math", "-I/usr/local/cuda/include"}); auto program = jitify::experimental::Program::deserialize(program_orig.serialize()); auto instance = jitify::experimental::KernelInstantiation::deserialize( program.kernel("constant_test").instantiate().serialize()); int inval[] = {2, 4, 8, 12, 14, 18, 22, 26, 30, 34, 38, 42}; int dval; CHECK_CUDA(instance.get_global_value("x::a", &dval)); EXPECT_EQ(dval, 3); CHECK_CUDA(instance.get_global_value("x::d", &dval)); EXPECT_EQ(dval, 7); int darr[2]; CHECK_CUDA(instance.get_global_array("y::a", &darr[0], 2)); EXPECT_EQ(darr[0], 4); EXPECT_EQ(darr[1], 5); CHECK_CUDA(instance.get_global_value("y::d", &darr)); EXPECT_EQ(darr[0], 8); EXPECT_EQ(darr[1], 9); CHECK_CUDA(instance.set_global_value("a", inval[0])); CHECK_CUDA(instance.set_global_value("b::a", inval[1])); CHECK_CUDA(instance.set_global_value("c::b::a", inval[2])); CHECK_CUDA(instance.set_global_value("d", inval[3])); CHECK_CUDA(instance.set_global_value("b::d", inval[4])); CHECK_CUDA(instance.set_global_value("c::b::d", inval[5])); CHECK_CUDA(instance.set_global_value("x::a", inval[6])); CHECK_CUDA(instance.set_global_value("x::d", inval[7])); CHECK_CUDA(instance.set_global_array("y::a", &inval[8], 2)); int inarr[] = {inval[10], inval[11]}; CHECK_CUDA(instance.set_global_value("y::d", inarr)); CHECK_CUDA(instance.configure(grid, block).launch(outdata)); CHECK_CUDART(hipDeviceSynchronize()); int outval[n_const]; CHECK_CUDART( hipMemcpy(outval, outdata, sizeof(outval), hipMemcpyDeviceToHost)); for (int i = 0; i < n_const; i++) { EXPECT_EQ(inval[i], outval[i]); } } { // test __constant__ array look up in header nested in both anonymous and // explicit namespace jitify::experimental::Program program_orig( "example_headers/constant_header.cuh", {}, {"--use_fast_math", "-I/usr/local/cuda/include"}); auto program = jitify::experimental::Program::deserialize(program_orig.serialize()); auto instance = jitify::experimental::KernelInstantiation::deserialize( program.kernel("constant_test2").instantiate().serialize()); constexpr int n_anon_const = 6; int inval[] = {3, 5, 9, 13, 15, 19}; CHECK_CUDA( cuMemcpyHtoD(instance.get_constant_ptr("(anonymous namespace)::b::a"), inval, sizeof(inval) / 2)); CHECK_CUDA( cuMemcpyHtoD(instance.get_global_ptr("(anonymous namespace)::b::d"), inval + 3, sizeof(inval) / 2)); CHECK_CUDA(instance.configure(grid, block).launch(outdata)); int outval[n_anon_const]; CHECK_CUDART( hipMemcpy(outval, outdata, sizeof(outval), hipMemcpyDeviceToHost)); for (int i = 0; i < n_anon_const; i++) { EXPECT_EQ(inval[i], outval[i]); } } CHECK_CUDART(hipFree(outdata)); } TEST(JitifyTest, ParallelFor) { int n = 10000; typedef float T; T* d_out; CHECK_CUDART(hipMalloc((void**)&d_out, n * sizeof(T))); T val = 3.14159f; jitify::ExecutionPolicy policy(jitify::DEVICE); auto lambda = JITIFY_LAMBDA((d_out, val), d_out[i] = (float)i * val); CHECK_CUDA(jitify::parallel_for(policy, 0, n, lambda)); std::vector<T> h_out(n); CHECK_CUDART( hipMemcpy(&h_out[0], d_out, n * sizeof(T), hipMemcpyDeviceToHost)); CHECK_CUDART(hipFree(d_out)); for (int i = 0; i < n; ++i) { EXPECT_FLOAT_EQ(h_out[i], (T)i * val); } } TEST(JitifyTest, InvalidPrograms) { jitify::JitCache kernel_cache; auto program_v1 = kernel_cache.program("empty_program\n"); // OK EXPECT_THROW(auto program_v1 = kernel_cache.program("missing_filename"), std::runtime_error); EXPECT_THROW( auto program_v1 = kernel_cache.program("bad_program\nNOT CUDA C!"), std::runtime_error); jitify::experimental::Program program_v2("empty_program\n"); // OK EXPECT_THROW(jitify::experimental::Program program_v2("missing_filename"), std::runtime_error); EXPECT_THROW( jitify::experimental::Program program_v2("bad_program\nNOT CUDA C!"), std::runtime_error); } // TODO: Expand this to include more Thrust code. static const char* const thrust_program_source = "thrust_program\n" "#include <thrust/iterator/counting_iterator.h>\n" "__global__ void my_kernel(thrust::counting_iterator<int> begin,\n" " thrust::counting_iterator<int> end) {\n" "}\n"; TEST(JitifyTest, ThrustHeaders) { // Checks that basic Thrust headers can be compiled. jitify::JitCache kernel_cache; auto program_v1 = kernel_cache.program(thrust_program_source, {}, {"-I" CUDA_INC_DIR, "-std=c++98"}); auto program_v2 = jitify::experimental::Program( thrust_program_source, {}, {"-I" CUDA_INC_DIR, "-std=c++98"}); } static const char* const cub_program_source = "cub_program\n" "#include <hipcub/hipcub.hpp>\n" "#include <cub/block/block_radix_sort.cuh>\n" "#include <hipcub/hipcub.hpp>\n" "#include <cub/block/block_store.cuh>\n" "\n" "template<int BLOCK_SIZE, int PER_THREAD>\n" "__global__ void my_kernel(float* data) {\n" " typedef cub::BlockLoad<float, BLOCK_SIZE, PER_THREAD,\n" " cub::BLOCK_LOAD_VECTORIZE> BlockLoad;\n" " typedef cub::BlockRadixSort<float, BLOCK_SIZE, PER_THREAD>\n" " BlockSort;\n" " typedef hipcub::BlockReduce<float, BLOCK_SIZE> BlockReduce;\n" " typedef cub::BlockStore<float, BLOCK_SIZE, PER_THREAD,\n" " cub::BLOCK_STORE_VECTORIZE> BlockStore;\n" " __shared__ union {\n" " typename BlockLoad::TempStorage load;\n" " typename BlockSort::TempStorage sort;\n" " typename BlockReduce::TempStorage reduce;\n" " typename BlockStore::TempStorage store;\n" " float sum;\n" " } temp_storage;\n" " float thread_data[PER_THREAD];\n" " BlockLoad(temp_storage.load).Load(data, thread_data);\n" " __syncthreads();\n" " BlockSort(temp_storage.sort).Sort(thread_data);\n" " __syncthreads();\n" " float sum = BlockReduce(temp_storage.reduce).Sum(thread_data);\n" " __syncthreads();\n" " if (threadIdx.x == 0) {\n" " temp_storage.sum = sum;\n" " }\n" " __syncthreads();\n" " sum = temp_storage.sum;\n" " #pragma unroll\n" " for (int i = 0; i < PER_THREAD; ++i) {\n" " thread_data[i] *= 1.f / sum;\n" " }\n" " __syncthreads();\n" " BlockStore(temp_storage.store).Store(data, thread_data);\n" "}\n"; TEST(JitifyTest, CubBlockPrimitives) { int block_size = 64; int per_thread = 4; int n = block_size * per_thread; std::vector<float> h_data(n); float sum = 0; for (int i = 0; i < n; ++i) { // Start with values sorted in reverse. h_data[i] = (float)(n - 1 - i); sum += h_data[i]; } // Shuffle the values a bit. std::swap(h_data[3], h_data[7]); std::swap(h_data[10], h_data[20]); std::vector<float> h_expected(n); for (int i = 0; i < n; ++i) { // Expected sorted and normalized. h_expected[i] = (float)i / sum; } std::vector<float> h_result(n); float* d_data; CHECK_CUDART(hipMalloc((void**)&d_data, n * sizeof(float))); jitify::JitCache kernel_cache; auto program_v1 = kernel_cache.program(cub_program_source, {}, {"-I" CUB_DIR, "-I" CUDA_INC_DIR}); CHECK_CUDART(hipMemcpy(d_data, h_data.data(), n * sizeof(float), hipMemcpyHostToDevice)); CHECK_CUDA(program_v1.kernel("my_kernel") .instantiate(block_size, per_thread) .configure(1, block_size) .launch(d_data)); CHECK_CUDART(hipMemcpy(h_result.data(), d_data, n * sizeof(float), hipMemcpyDeviceToHost)); for (int i = 0; i < n; ++i) { EXPECT_FLOAT_EQ(h_result[i], h_expected[i]); } auto program_v2 = jitify::experimental::Program::deserialize( jitify::experimental::Program(cub_program_source, {}, {"-I" CUB_DIR, "-I" CUDA_INC_DIR}) .serialize()); auto kernel_inst_v2 = jitify::experimental::KernelInstantiation::deserialize( program_v2.kernel("my_kernel") .instantiate(block_size, per_thread) .serialize()); CHECK_CUDART(hipMemcpy(d_data, h_data.data(), n * sizeof(float), hipMemcpyHostToDevice)); CHECK_CUDA(kernel_inst_v2.configure(1, block_size).launch(d_data)); CHECK_CUDART(hipMemcpy(h_result.data(), d_data, n * sizeof(float), hipMemcpyDeviceToHost)); for (int i = 0; i < n; ++i) { EXPECT_FLOAT_EQ(h_result[i], h_expected[i]); } CHECK_CUDART(hipFree(d_data)); } static const char* const unused_globals_source = "unused_globals_program\n" "struct Foo { static const int value = 7; };\n" "struct Bar { int a; double b; };\n" "__device__ float used_scalar;\n" "__device__ float used_array[2];\n" "__device__ Bar used_struct;\n" "__device__ float unused_scalar;\n" "__device__ float unused_array[3];\n" "__device__ Bar unused_struct;\n" "__device__ float reg, ret, bra;\n" // Tricky names "__global__ void foo_kernel(int* data) {\n" " if (blockIdx.x != 0 || threadIdx.x != 0) return;\n" " used_scalar = 1.f;\n" " used_array[1] = 2.f;\n" " used_struct.b = 3.f;\n" " __syncthreads();\n" " *data += Foo::value + used_scalar + used_array[1] + used_struct.b;\n" " printf(\"*data = %i\\n\", *data);\n" // Produces global symbols named // $str "}\n"; TEST(JitifyTest, RemoveUnusedGlobals) { hipFree(0); auto program_v2 = jitify::experimental::Program( unused_globals_source, {}, // Note: Flag added twice to test handling of repeats. {"-remove-unused-globals", "--remove-unused-globals"}); auto kernel_inst_v2 = program_v2.kernel("foo_kernel").instantiate(); std::string ptx = kernel_inst_v2.ptx(); EXPECT_TRUE(ptx.find(".global .align 4 .f32 used_scalar;") != std::string::npos); // Note: PTX represents arrays and structs as .b8 instead of the actual type. EXPECT_TRUE(ptx.find(".global .align 4 .b8 used_array[8];") != std::string::npos); EXPECT_TRUE(ptx.find(".global .align 8 .b8 used_struct[16];") != std::string::npos); EXPECT_FALSE(ptx.find("_ZN3Foo5valueE") != std::string::npos); EXPECT_FALSE(ptx.find("unused_scalar;") != std::string::npos); EXPECT_FALSE(ptx.find("unused_array;") != std::string::npos); EXPECT_FALSE(ptx.find("unused_struct;") != std::string::npos); EXPECT_FALSE(ptx.find(".global .align 4 .f32 reg;") != std::string::npos); EXPECT_FALSE(ptx.find(".global .align 4 .f32 ret;") != std::string::npos); EXPECT_FALSE(ptx.find(".global .align 4 .f32 bra;") != std::string::npos); int* d_data; CHECK_CUDART(hipMalloc((void**)&d_data, sizeof(int))); int h_data = 3; CHECK_CUDART( hipMemcpy(d_data, &h_data, sizeof(int), hipMemcpyHostToDevice)); CHECK_CUDA(kernel_inst_v2.configure(1, 1).launch(d_data)); CHECK_CUDART( hipMemcpy(&h_data, d_data, sizeof(int), hipMemcpyDeviceToHost)); EXPECT_EQ(h_data, 16); CHECK_CUDART(hipFree(d_data)); } static const char* const curand_program_source = "curand_program\n" "#include <hiprand/hiprand_kernel.h>\n" "__global__ void my_kernel() {}\n" "\n"; TEST(JitifyTest, CuRandKernel) { auto program_v2 = jitify::experimental::Program( curand_program_source, {}, // Note: --remove-unused-globals is added to remove huge precomputed // arrays that come from CURAND. {"-I" CUDA_INC_DIR, "--remove-unused-globals"}); auto kernel_inst_v2 = program_v2.kernel("my_kernel").instantiate(); // TODO: Expand this test to actually call hiprand kernels and check outputs. } static const char* const linktest_program1_source = "linktest_program1\n" "__constant__ int c = 5;\n" "__device__ int d = 7;\n" "__device__ int f(int i) { return i + 11; }\n" "\n"; static const char* const linktest_program2_source = "linktest_program2\n" "extern __constant__ int c;\n" "extern __device__ int d;\n" "extern __device__ int f(int);\n" "__global__ void my_kernel(int* data) {\n" " *data = f(*data + c + d);\n" "}\n" "\n"; TEST(JitifyTest, LinkExternalFiles) { hipFree(0); // Ensure temporary file is deleted at the end. std::unique_ptr<const char, int (*)(const char*)> ptx_filename( "example_headers/linktest.ptx", std::remove); { std::ofstream ptx_file(ptx_filename.get()); ptx_file.exceptions(std::ofstream::failbit | std::ofstream::badbit); ptx_file << jitify::experimental::Program(linktest_program1_source, {}, {"-rdc=true"}) .kernel("") .instantiate() .ptx(); } auto program_v2 = jitify::experimental::Program( linktest_program2_source, {}, {"-rdc=true", "-Lexample_headers", "-llinktest.ptx"}); auto kernel_inst_v2 = program_v2.kernel("my_kernel").instantiate(); int* d_data; CHECK_CUDART(hipMalloc((void**)&d_data, sizeof(int))); int h_data = 3; CHECK_CUDART( hipMemcpy(d_data, &h_data, sizeof(int), hipMemcpyHostToDevice)); CHECK_CUDA(kernel_inst_v2.configure(1, 1).launch(d_data)); CHECK_CUDART( hipMemcpy(&h_data, d_data, sizeof(int), hipMemcpyDeviceToHost)); EXPECT_EQ(h_data, 26); CHECK_CUDART(hipFree(d_data)); } namespace a { __host__ __device__ int external_device_func(int i) { return i + 1; } } // namespace a static const char* const selflink_program_source = "selflink_program\n" "namespace a {\n" "extern __device__ int external_device_func(int);\n" "}\n" "__global__ void my_kernel(int* data) {\n" " *data = a::external_device_func(*data);\n" "}\n" "\n"; TEST(JitifyTest, LinkCurrentExecutable) { hipFree(0); using namespace jitify::experimental; auto program = Program(selflink_program_source, {}, {"-l."}); auto kernel_inst = program.kernel("my_kernel").instantiate(); int* d_data; CHECK_CUDART(hipMalloc((void**)&d_data, sizeof(int))); int h_data = 3; CHECK_CUDART( hipMemcpy(d_data, &h_data, sizeof(int), hipMemcpyHostToDevice)); CHECK_CUDA(kernel_inst.configure(1, 1).launch(d_data)); CHECK_CUDART( hipMemcpy(&h_data, d_data, sizeof(int), hipMemcpyDeviceToHost)); EXPECT_EQ(h_data, 4); CHECK_CUDART(hipFree(d_data)); } static const char* const reflection_program_source = "reflection_program\n" "struct Base { virtual ~Base() {} };\n" "template <typename T>\n" "struct Derived : public Base {};\n" "template<typename T>\n" "__global__ void type_kernel() {}\n" "template<unsigned short N>\n" "__global__ void nontype_kernel() {}\n" "\n"; struct Base { virtual ~Base() {} }; template <typename T> struct Derived : public Base {}; TEST(JitifyTest, Reflection) { hipFree(0); using namespace jitify::experimental; using jitify::reflection::instance_of; Program program(reflection_program_source); auto type_kernel = program.kernel("type_kernel"); #define JITIFY_TYPE_REFLECTION_TEST(T) \ EXPECT_EQ(type_kernel.instantiate<T>().mangled_name(), \ type_kernel.instantiate({#T}).mangled_name()) JITIFY_TYPE_REFLECTION_TEST(const volatile float); JITIFY_TYPE_REFLECTION_TEST(const volatile float*); JITIFY_TYPE_REFLECTION_TEST(const volatile float&); JITIFY_TYPE_REFLECTION_TEST(Base * (const volatile float)); JITIFY_TYPE_REFLECTION_TEST(const volatile float[4]); #undef JITIFY_TYPE_REFLECTION_TEST typedef Derived<float> derived_type; const Base& base = derived_type(); EXPECT_EQ(type_kernel.instantiate(instance_of(base)).mangled_name(), type_kernel.instantiate<derived_type>().mangled_name()); auto nontype_kernel = program.kernel("nontype_kernel"); #define JITIFY_NONTYPE_REFLECTION_TEST(N) \ EXPECT_EQ(nontype_kernel.instantiate(N).mangled_name(), \ nontype_kernel.instantiate({#N}).mangled_name()) JITIFY_NONTYPE_REFLECTION_TEST(7); JITIFY_NONTYPE_REFLECTION_TEST('J'); #undef JITIFY_NONTYPE_REFLECTION_TEST } // NOTE: Keep this as the last test in the file, in case the env var is sticky. TEST(JitifyTest, EnvVarOptions) { setenv("JITIFY_OPTIONS", "-bad_option", true); EXPECT_THROW(jitify::JitCache kernel_cache; auto program = kernel_cache.program(simple_program_source), std::runtime_error); EXPECT_THROW(jitify::experimental::Program program(simple_program_source), std::runtime_error); setenv("JITIFY_OPTIONS", "", true); }
733855414154bdf7ea701ac10465b1fe317ed4b3.cu
/* * Copyright (c) 2017-2019, NVIDIA CORPORATION. All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * * Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * * Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * Neither the name of NVIDIA CORPORATION nor the names of its * contributors may be used to endorse or promote products derived * from this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ``AS IS'' AND ANY * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ #ifdef LINUX // Only supported by gcc on Linux (defined in Makefile) #define JITIFY_ENABLE_EMBEDDED_FILES 1 #endif #define JITIFY_PRINT_INSTANTIATION 1 #define JITIFY_PRINT_SOURCE 1 #define JITIFY_PRINT_LOG 1 #define JITIFY_PRINT_PTX 1 #define JITIFY_PRINT_LINKER_LOG 1 #define JITIFY_PRINT_LAUNCH 1 #define JITIFY_PRINT_HEADER_PATHS 1 #include "jitify.hpp" #include "example_headers/my_header1.cuh.jit" #ifdef LINUX // Only supported by gcc on Linux (defined in Makefile) JITIFY_INCLUDE_EMBEDDED_FILE(example_headers_my_header2_cuh); #endif #include "gtest/gtest.h" #include <cstdio> #include <fstream> #include <iostream> #include <memory> #define CHECK_CUDA(call) \ do { \ CUresult status = call; \ if (status != CUDA_SUCCESS) { \ const char* str; \ cuGetErrorName(status, &str); \ std::cout << "(CUDA) returned " << str; \ std::cout << " (" << __FILE__ << ":" << __LINE__ << ":" << __func__ \ << "())" << std::endl; \ ASSERT_EQ(status, CUDA_SUCCESS); \ } \ } while (0) #define CHECK_CUDART(call) \ do { \ cudaError_t status = call; \ if (status != cudaSuccess) { \ std::cout << "(CUDART) returned " << cudaGetErrorString(status); \ std::cout << " (" << __FILE__ << ":" << __LINE__ << ":" << __func__ \ << "())" << std::endl; \ ASSERT_EQ(status, cudaSuccess); \ } \ } while (0) std::istream* file_callback(std::string filename, std::iostream& tmp_stream) { // User returns NULL or pointer to stream containing file source // Note: tmp_stream is provided for convenience if (filename == "example_headers/my_header4.cuh") { tmp_stream << "#pragma once\n" "template<typename T>\n" "T pointless_func(T x) {\n" " return x;\n" "}\n"; return &tmp_stream; } else { // Find this file through other mechanisms return 0; } } static const char* const simple_program_source = "my_program\n" "template<int N, typename T>\n" "__global__\n" "void my_kernel(T* data) {\n" " if (blockIdx.x != 0 || threadIdx.x != 0) return;\n" " T data0 = data[0];\n" " for( int i=0; i<N-1; ++i ) {\n" " data[0] *= data0;\n" " }\n" "}\n"; TEST(JitifyTest, Simple) { static jitify::JitCache kernel_cache; jitify::Program program = kernel_cache.program(simple_program_source); typedef float T; T* d_data; CHECK_CUDART(cudaMalloc((void**)&d_data, sizeof(T))); dim3 grid(1); dim3 block(1); using jitify::reflection::type_of; auto kernel_inst = program.kernel("my_kernel").instantiate(3, type_of(*d_data)); T h_data = 5; CHECK_CUDART(cudaMemcpy(d_data, &h_data, sizeof(T), cudaMemcpyHostToDevice)); CHECK_CUDA(kernel_inst.configure(grid, block).launch(d_data)); CHECK_CUDART(cudaMemcpy(&h_data, d_data, sizeof(T), cudaMemcpyDeviceToHost)); EXPECT_FLOAT_EQ(h_data, 125.f); h_data = 5; CHECK_CUDART(cudaMemcpy(d_data, &h_data, sizeof(T), cudaMemcpyHostToDevice)); CHECK_CUDA(kernel_inst.configure_1d_max_occupancy().launch(d_data)); CHECK_CUDART(cudaMemcpy(&h_data, d_data, sizeof(T), cudaMemcpyDeviceToHost)); EXPECT_FLOAT_EQ(h_data, 125.f); CHECK_CUDART(cudaFree(d_data)); } TEST(JitifyTest, Simple_experimental) { std::vector<std::string> opts; jitify::experimental::Program program_orig(simple_program_source, {}, opts); auto program = jitify::experimental::Program::deserialize(program_orig.serialize()); typedef float T; T* d_data; CHECK_CUDART(cudaMalloc((void**)&d_data, sizeof(T))); dim3 grid(1); dim3 block(1); using jitify::reflection::type_of; auto kernel_inst_orig = program.kernel("my_kernel").instantiate(3, type_of(*d_data)); auto kernel_inst = jitify::experimental::KernelInstantiation::deserialize( kernel_inst_orig.serialize()); T h_data = 5; CHECK_CUDART(cudaMemcpy(d_data, &h_data, sizeof(T), cudaMemcpyHostToDevice)); CHECK_CUDA(kernel_inst.configure(grid, block).launch(d_data)); CHECK_CUDART(cudaMemcpy(&h_data, d_data, sizeof(T), cudaMemcpyDeviceToHost)); EXPECT_FLOAT_EQ(h_data, 125.f); h_data = 5; CHECK_CUDART(cudaMemcpy(d_data, &h_data, sizeof(T), cudaMemcpyHostToDevice)); CHECK_CUDA(kernel_inst.configure_1d_max_occupancy().launch(d_data)); CHECK_CUDART(cudaMemcpy(&h_data, d_data, sizeof(T), cudaMemcpyDeviceToHost)); EXPECT_FLOAT_EQ(h_data, 125.f); CHECK_CUDART(cudaFree(d_data)); } static const char* const multiple_kernels_program_source = "my_program1\n" "#include \"example_headers/my_header1.cuh\"\n" "#include \"example_headers/my_header2.cuh\"\n" "#include \"example_headers/my_header3.cuh\"\n" "#include \"example_headers/my_header4.cuh\"\n" "\n" "__global__\n" "void my_kernel1(float const* indata, float* outdata) {\n" " outdata[0] = indata[0] + 1;\n" " outdata[0] -= 1;\n" "}\n" "\n" "template<int C, typename T>\n" "__global__\n" "void my_kernel2(float const* indata, float* outdata) {\n" " for( int i=0; i<C; ++i ) {\n" " outdata[0] = " "pointless_func(identity(sqrt(square(negate(indata[0])))));\n" " }\n" "}\n"; TEST(JitifyTest, MultipleKernels) { using jitify::reflection::instance_of; using jitify::reflection::NonType; using jitify::reflection::reflect; using jitify::reflection::Type; using jitify::reflection::type_of; thread_local static jitify::JitCache kernel_cache; jitify::Program program = kernel_cache.program( multiple_kernels_program_source, // Code string specified above {example_headers_my_header1_cuh}, // Code string generated by stringify {"--use_fast_math", "-I/usr/local/cuda/include"}, file_callback); typedef float T; T* indata; T* outdata; CHECK_CUDART(cudaMalloc((void**)&indata, sizeof(T))); CHECK_CUDART(cudaMalloc((void**)&outdata, sizeof(T))); T inval = 3.14159f; CHECK_CUDART(cudaMemcpy(indata, &inval, sizeof(T), cudaMemcpyHostToDevice)); dim3 grid(1); dim3 block(1); CHECK_CUDA(program.kernel("my_kernel1") .instantiate() .configure(grid, block) .launch(indata, outdata)); enum { C = 123 }; // These invocations are all equivalent and will come from cache after the 1st CHECK_CUDA((program.kernel("my_kernel2") .instantiate<NonType<int, C>, T>() .configure(grid, block) .launch(indata, outdata))); CHECK_CUDA(program.kernel("my_kernel2") .instantiate({reflect((int)C), reflect<T>()}) .configure(grid, block) .launch(indata, outdata)); // Recommended versions CHECK_CUDA(program.kernel("my_kernel2") .instantiate((int)C, Type<T>()) .configure(grid, block) .launch(indata, outdata)); CHECK_CUDA(program.kernel("my_kernel2") .instantiate((int)C, type_of(*indata)) .configure(grid, block) .launch(indata, outdata)); CHECK_CUDA(program.kernel("my_kernel2") .instantiate((int)C, instance_of(*indata)) .configure(grid, block) .launch(indata, outdata)); T outval = 0; CHECK_CUDART(cudaMemcpy(&outval, outdata, sizeof(T), cudaMemcpyDeviceToHost)); CHECK_CUDART(cudaFree(outdata)); CHECK_CUDART(cudaFree(indata)); EXPECT_FLOAT_EQ(inval, outval); } TEST(JitifyTest, MultipleKernels_experimental) { using jitify::reflection::instance_of; using jitify::reflection::NonType; using jitify::reflection::reflect; using jitify::reflection::Type; using jitify::reflection::type_of; jitify::experimental::Program program_orig( multiple_kernels_program_source, // Code string specified above {example_headers_my_header1_cuh}, // Code string generated by stringify {"--use_fast_math", "-I/usr/local/cuda/include"}, file_callback); auto program = jitify::experimental::Program::deserialize(program_orig.serialize()); typedef float T; T* indata; T* outdata; CHECK_CUDART(cudaMalloc((void**)&indata, sizeof(T))); CHECK_CUDART(cudaMalloc((void**)&outdata, sizeof(T))); T inval = 3.14159f; CHECK_CUDART(cudaMemcpy(indata, &inval, sizeof(T), cudaMemcpyHostToDevice)); dim3 grid(1); dim3 block(1); CHECK_CUDA(program.kernel("my_kernel1") .instantiate() .configure(grid, block) .launch(indata, outdata)); enum { C = 123 }; // These invocations are all equivalent. CHECK_CUDA(jitify::experimental::KernelInstantiation::deserialize( program.kernel("my_kernel2") .instantiate<NonType<int, C>, T>() .serialize()) .configure(grid, block) .launch(indata, outdata)); CHECK_CUDA(jitify::experimental::KernelInstantiation::deserialize( program.kernel("my_kernel2") .instantiate({reflect((int)C), reflect<T>()}) .serialize()) .configure(grid, block) .launch(indata, outdata)); // Recommended versions CHECK_CUDA(jitify::experimental::KernelInstantiation::deserialize( program.kernel("my_kernel2") .instantiate((int)C, Type<T>()) .serialize()) .configure(grid, block) .launch(indata, outdata)); CHECK_CUDA(jitify::experimental::KernelInstantiation::deserialize( program.kernel("my_kernel2") .instantiate((int)C, type_of(*indata)) .serialize()) .configure(grid, block) .launch(indata, outdata)); CHECK_CUDA(jitify::experimental::KernelInstantiation::deserialize( program.kernel("my_kernel2") .instantiate((int)C, instance_of(*indata)) .serialize()) .configure(grid, block) .launch(indata, outdata)); T outval = 0; CHECK_CUDART(cudaMemcpy(&outval, outdata, sizeof(T), cudaMemcpyDeviceToHost)); CHECK_CUDART(cudaFree(outdata)); CHECK_CUDART(cudaFree(indata)); EXPECT_FLOAT_EQ(inval, outval); } static const char* const constmem_program_source = "constmem_program\n" "#pragma once\n" "\n" "__constant__ int a;\n" "__device__ int d;\n" "namespace b { __constant__ int a; __device__ int d; }\n" "namespace c { namespace b { __constant__ int a; __device__ int d; } }\n" "namespace x { __constant__ int a = 3; __device__ int d = 7; }\n" "namespace y { __constant__ int a[] = {4, 5}; __device__ int d[] = {8, 9}; " "}\n" "\n" "__global__ void constant_test(int *x) {\n" " x[0] = a;\n" " x[1] = b::a;\n" " x[2] = c::b::a;\n" " x[3] = d;\n" " x[4] = b::d;\n" " x[5] = c::b::d;\n" " x[6] = x::a;\n" " x[7] = x::d;\n" " x[8] = y::a[0];\n" " x[9] = y::a[1];\n" " x[10] = y::d[0];\n" " x[11] = y::d[1];\n" "}\n"; TEST(JitifyTest, ConstantMemory) { using jitify::reflection::Type; thread_local static jitify::JitCache kernel_cache; constexpr int n_const = 12; int* outdata; CHECK_CUDART(cudaMalloc((void**)&outdata, n_const * sizeof(int))); dim3 grid(1); dim3 block(1); { // test __constant__ look up in kernel string using diffrent namespaces jitify::Program program = kernel_cache.program(constmem_program_source, 0, {"--use_fast_math", "-I/usr/local/cuda/include"}); auto instance = program.kernel("constant_test").instantiate(); int inval[] = {2, 4, 8, 12, 14, 18, 22, 26, 30, 34, 38, 42}; int dval; CHECK_CUDA(instance.get_global_value("x::a", &dval)); EXPECT_EQ(dval, 3); CHECK_CUDA(instance.get_global_value("x::d", &dval)); EXPECT_EQ(dval, 7); int darr[2]; CHECK_CUDA(instance.get_global_array("y::a", &darr[0], 2)); EXPECT_EQ(darr[0], 4); EXPECT_EQ(darr[1], 5); CHECK_CUDA(instance.get_global_value("y::d", &darr)); EXPECT_EQ(darr[0], 8); EXPECT_EQ(darr[1], 9); CHECK_CUDA(instance.set_global_value("a", inval[0])); CHECK_CUDA(instance.set_global_value("b::a", inval[1])); CHECK_CUDA(instance.set_global_value("c::b::a", inval[2])); CHECK_CUDA(instance.set_global_value("d", inval[3])); CHECK_CUDA(instance.set_global_value("b::d", inval[4])); CHECK_CUDA(instance.set_global_value("c::b::d", inval[5])); CHECK_CUDA(instance.set_global_value("x::a", inval[6])); CHECK_CUDA(instance.set_global_value("x::d", inval[7])); CHECK_CUDA(instance.set_global_array("y::a", &inval[8], 2)); int inarr[] = {inval[10], inval[11]}; CHECK_CUDA(instance.set_global_value("y::d", inarr)); CHECK_CUDA(instance.configure(grid, block).launch(outdata)); CHECK_CUDART(cudaDeviceSynchronize()); int outval[n_const]; CHECK_CUDART( cudaMemcpy(outval, outdata, sizeof(outval), cudaMemcpyDeviceToHost)); for (int i = 0; i < n_const; i++) { EXPECT_EQ(inval[i], outval[i]); } } { // test __constant__ array look up in header nested in both anonymous and // explicit namespace jitify::Program program = kernel_cache.program("example_headers/constant_header.cuh", 0, {"--use_fast_math", "-I/usr/local/cuda/include"}); auto instance = program.kernel("constant_test2").instantiate(); constexpr int n_anon_const = 6; int inval[] = {3, 5, 9, 13, 15, 19}; CHECK_CUDA( cuMemcpyHtoD(instance.get_constant_ptr("(anonymous namespace)::b::a"), inval, sizeof(inval) / 2)); CHECK_CUDA( cuMemcpyHtoD(instance.get_global_ptr("(anonymous namespace)::b::d"), inval + 3, sizeof(inval) / 2)); CHECK_CUDA(instance.configure(grid, block).launch(outdata)); int outval[n_anon_const]; CHECK_CUDART( cudaMemcpy(outval, outdata, sizeof(outval), cudaMemcpyDeviceToHost)); for (int i = 0; i < n_anon_const; i++) { EXPECT_EQ(inval[i], outval[i]); } } CHECK_CUDART(cudaFree(outdata)); } TEST(JitifyTest, ConstantMemory_experimental) { using jitify::reflection::Type; constexpr int n_const = 12; int* outdata; CHECK_CUDART(cudaMalloc((void**)&outdata, n_const * sizeof(int))); dim3 grid(1); dim3 block(1); { // test __constant__ look up in kernel string using different namespaces jitify::experimental::Program program_orig( constmem_program_source, {}, {"--use_fast_math", "-I/usr/local/cuda/include"}); auto program = jitify::experimental::Program::deserialize(program_orig.serialize()); auto instance = jitify::experimental::KernelInstantiation::deserialize( program.kernel("constant_test").instantiate().serialize()); int inval[] = {2, 4, 8, 12, 14, 18, 22, 26, 30, 34, 38, 42}; int dval; CHECK_CUDA(instance.get_global_value("x::a", &dval)); EXPECT_EQ(dval, 3); CHECK_CUDA(instance.get_global_value("x::d", &dval)); EXPECT_EQ(dval, 7); int darr[2]; CHECK_CUDA(instance.get_global_array("y::a", &darr[0], 2)); EXPECT_EQ(darr[0], 4); EXPECT_EQ(darr[1], 5); CHECK_CUDA(instance.get_global_value("y::d", &darr)); EXPECT_EQ(darr[0], 8); EXPECT_EQ(darr[1], 9); CHECK_CUDA(instance.set_global_value("a", inval[0])); CHECK_CUDA(instance.set_global_value("b::a", inval[1])); CHECK_CUDA(instance.set_global_value("c::b::a", inval[2])); CHECK_CUDA(instance.set_global_value("d", inval[3])); CHECK_CUDA(instance.set_global_value("b::d", inval[4])); CHECK_CUDA(instance.set_global_value("c::b::d", inval[5])); CHECK_CUDA(instance.set_global_value("x::a", inval[6])); CHECK_CUDA(instance.set_global_value("x::d", inval[7])); CHECK_CUDA(instance.set_global_array("y::a", &inval[8], 2)); int inarr[] = {inval[10], inval[11]}; CHECK_CUDA(instance.set_global_value("y::d", inarr)); CHECK_CUDA(instance.configure(grid, block).launch(outdata)); CHECK_CUDART(cudaDeviceSynchronize()); int outval[n_const]; CHECK_CUDART( cudaMemcpy(outval, outdata, sizeof(outval), cudaMemcpyDeviceToHost)); for (int i = 0; i < n_const; i++) { EXPECT_EQ(inval[i], outval[i]); } } { // test __constant__ array look up in header nested in both anonymous and // explicit namespace jitify::experimental::Program program_orig( "example_headers/constant_header.cuh", {}, {"--use_fast_math", "-I/usr/local/cuda/include"}); auto program = jitify::experimental::Program::deserialize(program_orig.serialize()); auto instance = jitify::experimental::KernelInstantiation::deserialize( program.kernel("constant_test2").instantiate().serialize()); constexpr int n_anon_const = 6; int inval[] = {3, 5, 9, 13, 15, 19}; CHECK_CUDA( cuMemcpyHtoD(instance.get_constant_ptr("(anonymous namespace)::b::a"), inval, sizeof(inval) / 2)); CHECK_CUDA( cuMemcpyHtoD(instance.get_global_ptr("(anonymous namespace)::b::d"), inval + 3, sizeof(inval) / 2)); CHECK_CUDA(instance.configure(grid, block).launch(outdata)); int outval[n_anon_const]; CHECK_CUDART( cudaMemcpy(outval, outdata, sizeof(outval), cudaMemcpyDeviceToHost)); for (int i = 0; i < n_anon_const; i++) { EXPECT_EQ(inval[i], outval[i]); } } CHECK_CUDART(cudaFree(outdata)); } TEST(JitifyTest, ParallelFor) { int n = 10000; typedef float T; T* d_out; CHECK_CUDART(cudaMalloc((void**)&d_out, n * sizeof(T))); T val = 3.14159f; jitify::ExecutionPolicy policy(jitify::DEVICE); auto lambda = JITIFY_LAMBDA((d_out, val), d_out[i] = (float)i * val); CHECK_CUDA(jitify::parallel_for(policy, 0, n, lambda)); std::vector<T> h_out(n); CHECK_CUDART( cudaMemcpy(&h_out[0], d_out, n * sizeof(T), cudaMemcpyDeviceToHost)); CHECK_CUDART(cudaFree(d_out)); for (int i = 0; i < n; ++i) { EXPECT_FLOAT_EQ(h_out[i], (T)i * val); } } TEST(JitifyTest, InvalidPrograms) { jitify::JitCache kernel_cache; auto program_v1 = kernel_cache.program("empty_program\n"); // OK EXPECT_THROW(auto program_v1 = kernel_cache.program("missing_filename"), std::runtime_error); EXPECT_THROW( auto program_v1 = kernel_cache.program("bad_program\nNOT CUDA C!"), std::runtime_error); jitify::experimental::Program program_v2("empty_program\n"); // OK EXPECT_THROW(jitify::experimental::Program program_v2("missing_filename"), std::runtime_error); EXPECT_THROW( jitify::experimental::Program program_v2("bad_program\nNOT CUDA C!"), std::runtime_error); } // TODO: Expand this to include more Thrust code. static const char* const thrust_program_source = "thrust_program\n" "#include <thrust/iterator/counting_iterator.h>\n" "__global__ void my_kernel(thrust::counting_iterator<int> begin,\n" " thrust::counting_iterator<int> end) {\n" "}\n"; TEST(JitifyTest, ThrustHeaders) { // Checks that basic Thrust headers can be compiled. jitify::JitCache kernel_cache; auto program_v1 = kernel_cache.program(thrust_program_source, {}, {"-I" CUDA_INC_DIR, "-std=c++98"}); auto program_v2 = jitify::experimental::Program( thrust_program_source, {}, {"-I" CUDA_INC_DIR, "-std=c++98"}); } static const char* const cub_program_source = "cub_program\n" "#include <cub/block/block_load.cuh>\n" "#include <cub/block/block_radix_sort.cuh>\n" "#include <cub/block/block_reduce.cuh>\n" "#include <cub/block/block_store.cuh>\n" "\n" "template<int BLOCK_SIZE, int PER_THREAD>\n" "__global__ void my_kernel(float* data) {\n" " typedef cub::BlockLoad<float, BLOCK_SIZE, PER_THREAD,\n" " cub::BLOCK_LOAD_VECTORIZE> BlockLoad;\n" " typedef cub::BlockRadixSort<float, BLOCK_SIZE, PER_THREAD>\n" " BlockSort;\n" " typedef cub::BlockReduce<float, BLOCK_SIZE> BlockReduce;\n" " typedef cub::BlockStore<float, BLOCK_SIZE, PER_THREAD,\n" " cub::BLOCK_STORE_VECTORIZE> BlockStore;\n" " __shared__ union {\n" " typename BlockLoad::TempStorage load;\n" " typename BlockSort::TempStorage sort;\n" " typename BlockReduce::TempStorage reduce;\n" " typename BlockStore::TempStorage store;\n" " float sum;\n" " } temp_storage;\n" " float thread_data[PER_THREAD];\n" " BlockLoad(temp_storage.load).Load(data, thread_data);\n" " __syncthreads();\n" " BlockSort(temp_storage.sort).Sort(thread_data);\n" " __syncthreads();\n" " float sum = BlockReduce(temp_storage.reduce).Sum(thread_data);\n" " __syncthreads();\n" " if (threadIdx.x == 0) {\n" " temp_storage.sum = sum;\n" " }\n" " __syncthreads();\n" " sum = temp_storage.sum;\n" " #pragma unroll\n" " for (int i = 0; i < PER_THREAD; ++i) {\n" " thread_data[i] *= 1.f / sum;\n" " }\n" " __syncthreads();\n" " BlockStore(temp_storage.store).Store(data, thread_data);\n" "}\n"; TEST(JitifyTest, CubBlockPrimitives) { int block_size = 64; int per_thread = 4; int n = block_size * per_thread; std::vector<float> h_data(n); float sum = 0; for (int i = 0; i < n; ++i) { // Start with values sorted in reverse. h_data[i] = (float)(n - 1 - i); sum += h_data[i]; } // Shuffle the values a bit. std::swap(h_data[3], h_data[7]); std::swap(h_data[10], h_data[20]); std::vector<float> h_expected(n); for (int i = 0; i < n; ++i) { // Expected sorted and normalized. h_expected[i] = (float)i / sum; } std::vector<float> h_result(n); float* d_data; CHECK_CUDART(cudaMalloc((void**)&d_data, n * sizeof(float))); jitify::JitCache kernel_cache; auto program_v1 = kernel_cache.program(cub_program_source, {}, {"-I" CUB_DIR, "-I" CUDA_INC_DIR}); CHECK_CUDART(cudaMemcpy(d_data, h_data.data(), n * sizeof(float), cudaMemcpyHostToDevice)); CHECK_CUDA(program_v1.kernel("my_kernel") .instantiate(block_size, per_thread) .configure(1, block_size) .launch(d_data)); CHECK_CUDART(cudaMemcpy(h_result.data(), d_data, n * sizeof(float), cudaMemcpyDeviceToHost)); for (int i = 0; i < n; ++i) { EXPECT_FLOAT_EQ(h_result[i], h_expected[i]); } auto program_v2 = jitify::experimental::Program::deserialize( jitify::experimental::Program(cub_program_source, {}, {"-I" CUB_DIR, "-I" CUDA_INC_DIR}) .serialize()); auto kernel_inst_v2 = jitify::experimental::KernelInstantiation::deserialize( program_v2.kernel("my_kernel") .instantiate(block_size, per_thread) .serialize()); CHECK_CUDART(cudaMemcpy(d_data, h_data.data(), n * sizeof(float), cudaMemcpyHostToDevice)); CHECK_CUDA(kernel_inst_v2.configure(1, block_size).launch(d_data)); CHECK_CUDART(cudaMemcpy(h_result.data(), d_data, n * sizeof(float), cudaMemcpyDeviceToHost)); for (int i = 0; i < n; ++i) { EXPECT_FLOAT_EQ(h_result[i], h_expected[i]); } CHECK_CUDART(cudaFree(d_data)); } static const char* const unused_globals_source = "unused_globals_program\n" "struct Foo { static const int value = 7; };\n" "struct Bar { int a; double b; };\n" "__device__ float used_scalar;\n" "__device__ float used_array[2];\n" "__device__ Bar used_struct;\n" "__device__ float unused_scalar;\n" "__device__ float unused_array[3];\n" "__device__ Bar unused_struct;\n" "__device__ float reg, ret, bra;\n" // Tricky names "__global__ void foo_kernel(int* data) {\n" " if (blockIdx.x != 0 || threadIdx.x != 0) return;\n" " used_scalar = 1.f;\n" " used_array[1] = 2.f;\n" " used_struct.b = 3.f;\n" " __syncthreads();\n" " *data += Foo::value + used_scalar + used_array[1] + used_struct.b;\n" " printf(\"*data = %i\\n\", *data);\n" // Produces global symbols named // $str "}\n"; TEST(JitifyTest, RemoveUnusedGlobals) { cudaFree(0); auto program_v2 = jitify::experimental::Program( unused_globals_source, {}, // Note: Flag added twice to test handling of repeats. {"-remove-unused-globals", "--remove-unused-globals"}); auto kernel_inst_v2 = program_v2.kernel("foo_kernel").instantiate(); std::string ptx = kernel_inst_v2.ptx(); EXPECT_TRUE(ptx.find(".global .align 4 .f32 used_scalar;") != std::string::npos); // Note: PTX represents arrays and structs as .b8 instead of the actual type. EXPECT_TRUE(ptx.find(".global .align 4 .b8 used_array[8];") != std::string::npos); EXPECT_TRUE(ptx.find(".global .align 8 .b8 used_struct[16];") != std::string::npos); EXPECT_FALSE(ptx.find("_ZN3Foo5valueE") != std::string::npos); EXPECT_FALSE(ptx.find("unused_scalar;") != std::string::npos); EXPECT_FALSE(ptx.find("unused_array;") != std::string::npos); EXPECT_FALSE(ptx.find("unused_struct;") != std::string::npos); EXPECT_FALSE(ptx.find(".global .align 4 .f32 reg;") != std::string::npos); EXPECT_FALSE(ptx.find(".global .align 4 .f32 ret;") != std::string::npos); EXPECT_FALSE(ptx.find(".global .align 4 .f32 bra;") != std::string::npos); int* d_data; CHECK_CUDART(cudaMalloc((void**)&d_data, sizeof(int))); int h_data = 3; CHECK_CUDART( cudaMemcpy(d_data, &h_data, sizeof(int), cudaMemcpyHostToDevice)); CHECK_CUDA(kernel_inst_v2.configure(1, 1).launch(d_data)); CHECK_CUDART( cudaMemcpy(&h_data, d_data, sizeof(int), cudaMemcpyDeviceToHost)); EXPECT_EQ(h_data, 16); CHECK_CUDART(cudaFree(d_data)); } static const char* const curand_program_source = "curand_program\n" "#include <curand_kernel.h>\n" "__global__ void my_kernel() {}\n" "\n"; TEST(JitifyTest, CuRandKernel) { auto program_v2 = jitify::experimental::Program( curand_program_source, {}, // Note: --remove-unused-globals is added to remove huge precomputed // arrays that come from CURAND. {"-I" CUDA_INC_DIR, "--remove-unused-globals"}); auto kernel_inst_v2 = program_v2.kernel("my_kernel").instantiate(); // TODO: Expand this test to actually call curand kernels and check outputs. } static const char* const linktest_program1_source = "linktest_program1\n" "__constant__ int c = 5;\n" "__device__ int d = 7;\n" "__device__ int f(int i) { return i + 11; }\n" "\n"; static const char* const linktest_program2_source = "linktest_program2\n" "extern __constant__ int c;\n" "extern __device__ int d;\n" "extern __device__ int f(int);\n" "__global__ void my_kernel(int* data) {\n" " *data = f(*data + c + d);\n" "}\n" "\n"; TEST(JitifyTest, LinkExternalFiles) { cudaFree(0); // Ensure temporary file is deleted at the end. std::unique_ptr<const char, int (*)(const char*)> ptx_filename( "example_headers/linktest.ptx", std::remove); { std::ofstream ptx_file(ptx_filename.get()); ptx_file.exceptions(std::ofstream::failbit | std::ofstream::badbit); ptx_file << jitify::experimental::Program(linktest_program1_source, {}, {"-rdc=true"}) .kernel("") .instantiate() .ptx(); } auto program_v2 = jitify::experimental::Program( linktest_program2_source, {}, {"-rdc=true", "-Lexample_headers", "-llinktest.ptx"}); auto kernel_inst_v2 = program_v2.kernel("my_kernel").instantiate(); int* d_data; CHECK_CUDART(cudaMalloc((void**)&d_data, sizeof(int))); int h_data = 3; CHECK_CUDART( cudaMemcpy(d_data, &h_data, sizeof(int), cudaMemcpyHostToDevice)); CHECK_CUDA(kernel_inst_v2.configure(1, 1).launch(d_data)); CHECK_CUDART( cudaMemcpy(&h_data, d_data, sizeof(int), cudaMemcpyDeviceToHost)); EXPECT_EQ(h_data, 26); CHECK_CUDART(cudaFree(d_data)); } namespace a { __host__ __device__ int external_device_func(int i) { return i + 1; } } // namespace a static const char* const selflink_program_source = "selflink_program\n" "namespace a {\n" "extern __device__ int external_device_func(int);\n" "}\n" "__global__ void my_kernel(int* data) {\n" " *data = a::external_device_func(*data);\n" "}\n" "\n"; TEST(JitifyTest, LinkCurrentExecutable) { cudaFree(0); using namespace jitify::experimental; auto program = Program(selflink_program_source, {}, {"-l."}); auto kernel_inst = program.kernel("my_kernel").instantiate(); int* d_data; CHECK_CUDART(cudaMalloc((void**)&d_data, sizeof(int))); int h_data = 3; CHECK_CUDART( cudaMemcpy(d_data, &h_data, sizeof(int), cudaMemcpyHostToDevice)); CHECK_CUDA(kernel_inst.configure(1, 1).launch(d_data)); CHECK_CUDART( cudaMemcpy(&h_data, d_data, sizeof(int), cudaMemcpyDeviceToHost)); EXPECT_EQ(h_data, 4); CHECK_CUDART(cudaFree(d_data)); } static const char* const reflection_program_source = "reflection_program\n" "struct Base { virtual ~Base() {} };\n" "template <typename T>\n" "struct Derived : public Base {};\n" "template<typename T>\n" "__global__ void type_kernel() {}\n" "template<unsigned short N>\n" "__global__ void nontype_kernel() {}\n" "\n"; struct Base { virtual ~Base() {} }; template <typename T> struct Derived : public Base {}; TEST(JitifyTest, Reflection) { cudaFree(0); using namespace jitify::experimental; using jitify::reflection::instance_of; Program program(reflection_program_source); auto type_kernel = program.kernel("type_kernel"); #define JITIFY_TYPE_REFLECTION_TEST(T) \ EXPECT_EQ(type_kernel.instantiate<T>().mangled_name(), \ type_kernel.instantiate({#T}).mangled_name()) JITIFY_TYPE_REFLECTION_TEST(const volatile float); JITIFY_TYPE_REFLECTION_TEST(const volatile float*); JITIFY_TYPE_REFLECTION_TEST(const volatile float&); JITIFY_TYPE_REFLECTION_TEST(Base * (const volatile float)); JITIFY_TYPE_REFLECTION_TEST(const volatile float[4]); #undef JITIFY_TYPE_REFLECTION_TEST typedef Derived<float> derived_type; const Base& base = derived_type(); EXPECT_EQ(type_kernel.instantiate(instance_of(base)).mangled_name(), type_kernel.instantiate<derived_type>().mangled_name()); auto nontype_kernel = program.kernel("nontype_kernel"); #define JITIFY_NONTYPE_REFLECTION_TEST(N) \ EXPECT_EQ(nontype_kernel.instantiate(N).mangled_name(), \ nontype_kernel.instantiate({#N}).mangled_name()) JITIFY_NONTYPE_REFLECTION_TEST(7); JITIFY_NONTYPE_REFLECTION_TEST('J'); #undef JITIFY_NONTYPE_REFLECTION_TEST } // NOTE: Keep this as the last test in the file, in case the env var is sticky. TEST(JitifyTest, EnvVarOptions) { setenv("JITIFY_OPTIONS", "-bad_option", true); EXPECT_THROW(jitify::JitCache kernel_cache; auto program = kernel_cache.program(simple_program_source), std::runtime_error); EXPECT_THROW(jitify::experimental::Program program(simple_program_source), std::runtime_error); setenv("JITIFY_OPTIONS", "", true); }
5e26f2bc9c2bc51e50ece6a1adbb430f6d56193d.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include <algorithm> #include "caffe2/core/operator.h" #include "caffe2/operators/segment_reduction_op.h" #include "caffe2/operators/segment_reduction_op_gpu.cuh" #include "caffe2/utils/math.h" namespace caffe2 { namespace { void inclusive_scan_wrapper( const int* length_data, int len_length, Tensor* temp_buffer, Tensor* prefix_sum_out, CUDAContext* context_) { // Retrieve buffer size size_t temp_storage_bytes = 0; hipcub::DeviceScan::InclusiveSum( NULL, temp_storage_bytes, length_data, prefix_sum_out->template mutable_data<int>(), len_length, context_->cuda_stream()); // Allocate temporary storage auto buffer_size = (temp_storage_bytes + sizeof(int)) / sizeof(int); temp_buffer->Resize(buffer_size); void* d_temp_storage = static_cast<void*>(temp_buffer->template mutable_data<int>()); // Run inclusive prefix sum hipcub::DeviceScan::InclusiveSum( d_temp_storage, temp_storage_bytes, length_data, prefix_sum_out->template mutable_data<int>(), len_length, context_->cuda_stream()); } template <typename T, bool ExactBlock = false, bool Average = false> #ifdef __HIP_PLATFORM_HCC__ C10_LAUNCH_BOUNDS_2(1024, SEGREDUCE_MINBLOCKS) #endif __global__ void length_sum_kernel( const T* __restrict__ in, T* __restrict__ out, const int* __restrict__ prefix_sum_length_data, int N, int post, int len_length) { // len_length blocks int group = blockIdx.x; int start = group == 0 ? 0 : prefix_sum_length_data[group - 1]; int end = prefix_sum_length_data[group]; CUDA_KERNEL_ASSERT(start <= N); CUDA_KERNEL_ASSERT(end <= N); if (ExactBlock) { in += threadIdx.x; T sum = (T)0; for (int line = start; line < end; ++line) { sum += in[line * post]; } if (Average && (end - start) > 1) { sum /= (end - start); } out[group * post + threadIdx.x] = sum; } else { for (int i = threadIdx.x; i < post; i += blockDim.x) { T sum = (T)0; for (int line = start; line < end; ++line) { sum += in[line * post + i]; } if (Average && (end - start) > 1) { sum /= (end - start); } out[group * post + i] = sum; } } } template <typename T, bool ExactBlock = false, bool Average = false> #ifdef __HIP_PLATFORM_HCC__ C10_LAUNCH_BOUNDS_2(1024, SEGREDUCE_MINBLOCKS) #endif __global__ void length_sum_gradient_kernel( const T* __restrict__ grad_in, T* __restrict__ grad_out, const int* __restrict__ prefix_sum_length_data, int N, int post, int len_length) { // len_length blocks int group = blockIdx.x; int start = group == 0 ? 0 : prefix_sum_length_data[group - 1]; int end = prefix_sum_length_data[group]; CUDA_KERNEL_ASSERT(start <= N); CUDA_KERNEL_ASSERT(end <= N); if (ExactBlock) { grad_out += threadIdx.x; grad_in += threadIdx.x; for (int line = start + threadIdx.y; line < end; line += blockDim.y) { grad_out[line * post] = grad_in[group * post]; if (Average && (end - start) > 1) { grad_out[line * post] /= (end - start); } } } else { for (int i = threadIdx.x; i < post; i += blockDim.x) { for (int line = start; line < end; ++line) { grad_out[line * post + i] = grad_in[group * post + i]; if (Average && (end - start) > 1) { grad_out[line * post + i] /= (end - start); } } } } } template <typename T, bool ExactBlock = false> #ifdef __HIP_PLATFORM_HCC__ C10_LAUNCH_BOUNDS_2(1024, SEGREDUCE_MINBLOCKS) #endif __global__ void length_max_kernel( const T* __restrict__ in, T* __restrict__ out, const int* __restrict__ prefix_sum_length_data, int N, int post, int len_length, const T numeric_min) { // len_length blocks int group = blockIdx.x; int start = group == 0 ? 0 : prefix_sum_length_data[group - 1]; int end = prefix_sum_length_data[group]; CUDA_KERNEL_ASSERT(start <= N); CUDA_KERNEL_ASSERT(end <= N); if (ExactBlock) { in += threadIdx.x; T max = numeric_min; for (int line = start; line < end; ++line) { T in_data = in[line * post]; max = max > in_data ? max : in_data; } // setting output to 0 to not break gradient max = max == numeric_min ? 0 : max; out[group * post + threadIdx.x] = max; } else { for (int i = threadIdx.x; i < post; i += blockDim.x) { T max = numeric_min; for (int line = start; line < end; ++line) { T in_data = in[line * post + i]; max = max > in_data ? max : in_data; } // setting output to 0 to not break gradient max = max == numeric_min ? 0 : max; out[group * post + i] = max; } } } template <typename T, bool ExactBlock = false> #ifdef __HIP_PLATFORM_HCC__ C10_LAUNCH_BOUNDS_2(1024, SEGREDUCE_MINBLOCKS) #endif __global__ void length_weighted_sum_gradient_kernel( const T* __restrict__ grad_in, const T* __restrict__ weights_in, T* __restrict__ grad_out, const int* __restrict__ prefix_sum_length_data, int N, int post, int len_length) { // len_length blocks int group = blockIdx.x; int start = group == 0 ? 0 : prefix_sum_length_data[group - 1]; int end = prefix_sum_length_data[group]; CUDA_KERNEL_ASSERT(start <= N); CUDA_KERNEL_ASSERT(end <= N); if (ExactBlock) { grad_out += threadIdx.x; grad_in += threadIdx.x; for (int line = start + threadIdx.y; line < end; line += blockDim.y) { grad_out[line * post] = weights_in[line] * grad_in[group * post]; } } else { for (int i = threadIdx.x; i < post; i += blockDim.x) { for (int line = start; line < end; ++line) { grad_out[line * post + i] = weights_in[line] * grad_in[group * post + i]; } } } } template <typename T, typename IndexType, int NumThreads> #ifdef __HIP_PLATFORM_HCC__ C10_LAUNCH_BOUNDS_2(1024, SEGREDUCE_MINBLOCKS) #endif __global__ void length_weighted_sum_with_main_input_gradient_kernel( const T* __restrict__ grad_in, const T* __restrict__ weights_in, const T* __restrict__ data_in, const IndexType* __restrict__ indices, T* __restrict__ data_grad_out, T* __restrict__ weights_grad_out, const int* __restrict__ prefix_sum_length_data, int N, int post, int len_length) { // len_length blocks int group = blockIdx.x; int start = group == 0 ? 0 : prefix_sum_length_data[group - 1]; int end = prefix_sum_length_data[group]; CUDA_KERNEL_ASSERT(start <= N); CUDA_KERNEL_ASSERT(end <= N); // todo figure this num threads thing typedef hipcub::BlockReduce<float, NumThreads> BlockReduce; __shared__ typename BlockReduce::TempStorage temp_storage; // TODO(wyiming): parallelize this outter loop for (int line = start; line < end; ++line) { T w_grad = 0; for (int i = threadIdx.x; i < post; i += blockDim.x) { auto g_in = grad_in[group * post + i]; data_grad_out[line * post + i] = weights_in[line] * g_in; w_grad += g_in * data_in[indices[line] * post + i]; } w_grad = BlockReduce(temp_storage).Reduce(w_grad, hipcub::Sum()); if (threadIdx.x == 0) { weights_grad_out[line] = w_grad; } __syncthreads(); } } template <typename T, typename IndexType, bool ExactBlock = false> #ifdef __HIP_PLATFORM_HCC__ C10_LAUNCH_BOUNDS_2(1024, SEGREDUCE_MINBLOCKS) #endif __global__ void sparse_length_max_kernel( const T* __restrict__ in, T* __restrict__ out, const int* __restrict__ prefix_sum_length_data, const IndexType* __restrict__ indices, int N, int post, int len_length, int len_indices, const T numeric_min) { // len_length blocks int group = blockIdx.x; int start = group == 0 ? 0 : prefix_sum_length_data[group - 1]; int end = prefix_sum_length_data[group]; CUDA_KERNEL_ASSERT(start <= len_indices); CUDA_KERNEL_ASSERT(end <= len_indices); extern __shared__ T reduceVals[]; if (ExactBlock) { T max = numeric_min; in += threadIdx.x; for (int line = start + threadIdx.y; line < end; line += blockDim.y) { T in_data = in[indices[line] * post]; max = max > in_data ? max : in_data; } reduceVals[threadIdx.y * blockDim.x + threadIdx.x] = max; __syncthreads(); if (threadIdx.y == 0) { max = numeric_min; for (int i = 0; i < blockDim.y; ++i) { T in_data = reduceVals[i * blockDim.x + threadIdx.x]; max = max > in_data ? max : in_data; } // setting output to 0 to not break gradient max = max == numeric_min ? 0 : max; out[group * post + threadIdx.x] = max; } } else { for (int i = threadIdx.x; i < post; i += blockDim.x) { T max = numeric_min; for (int line = start; line < end; ++line) { T in_data = in[indices[line] * post + i]; max = max > in_data ? max : in_data; } // setting output to 0 to not break gradient max = max == numeric_min ? 0 : max; out[group * post + i] = max; } } } template <typename T, typename IndexType, bool ExactBlock = false> #ifdef __HIP_PLATFORM_HCC__ C10_LAUNCH_BOUNDS_2(1024, SEGREDUCE_MINBLOCKS) #endif __global__ void sparse_length_weighted_sum_kernel( const T* __restrict__ in, const T* __restrict__ in_weights, T* __restrict__ out, const int* __restrict__ prefix_sum_length_data, const IndexType* __restrict__ indices, int N, int post, int len_length, int len_indices) { // len_length blocks int group = blockIdx.x; int start = group == 0 ? 0 : prefix_sum_length_data[group - 1]; int end = prefix_sum_length_data[group]; CUDA_KERNEL_ASSERT(start <= len_indices); CUDA_KERNEL_ASSERT(end <= len_indices); extern __shared__ T reduceVals[]; if (ExactBlock) { T sum = (T)0; in += threadIdx.x; for (int line = start + threadIdx.y; line < end; line += blockDim.y) { sum += in_weights[line] * in[indices[line] * post]; } reduceVals[threadIdx.y * blockDim.x + threadIdx.x] = sum; __syncthreads(); if (threadIdx.y == 0) { sum = (T)0; for (int i = 0; i < blockDim.y; ++i) { sum += reduceVals[i * blockDim.x + threadIdx.x]; } out[group * post + threadIdx.x] = sum; } } else { for (int i = threadIdx.x; i < post; i += blockDim.x) { T sum = (T)0; for (int line = start; line < end; ++line) { sum += in_weights[line] * in[indices[line] * post + i]; } out[group * post + i] = sum; } } } } // namespace template <typename T, class Context = CUDAContext, bool SparseFused = true> class CUDASparseLengthsSumOp : public Operator<CUDAContext> { public: USE_OPERATOR_CONTEXT_FUNCTIONS; template <class... Args> explicit CUDASparseLengthsSumOp(Args&&... args) : Operator<CUDAContext>(std::forward<Args>(args)...) {} ~CUDASparseLengthsSumOp() {} bool RunOnDevice() override { if (SparseFused) { return DispatchHelper<TensorTypes<int32_t, int64_t>>::call( this, Input(INDICES)); } else { // type doesn't matter return DoRunWithType<int32_t>(); } } template <typename IndexType> bool DoRunWithType() { if (SparseFused) { return DispatchHelper<TensorTypes2<float, at::Half>, IndexType>::call( this, Input(DATA)); } else { return DoRunWithType2<IndexType, T>(); } } template <typename IndexType, typename InType> bool DoRunWithType2() { auto& dataInput = Input(DATA); auto& lengthsInput = Input(LENGTHS); CAFFE_ENFORCE_EQ(1, lengthsInput.dim(), "LENGTHS must be a vector"); const int64_t dataSize = dataInput.dim(0); // Either first dim the data or how much we pull in indexies from it int64_t dataToReduceSize; const int64_t outputSize = lengthsInput.dim(0); const int len_length = outputSize; auto shape = dataInput.sizes().vec(); shape[0] = outputSize; auto* output = Output(0, shape, at::dtype<T>()); T* out_data = output->template mutable_data<T>(); if (len_length <= 0) { // return early to avoid invalid empty kernel return true; } const IndexType* indices; if (SparseFused) { // static if auto& indicesInput = Input(INDICES); CAFFE_ENFORCE_EQ(1, indicesInput.dim(), "INDICES must be a vector"); indices = indicesInput.template data<IndexType>(); dataToReduceSize = indicesInput.dim(0); } else { dataToReduceSize = dataSize; } // only compute this the first time inclusive_scan_length_buffer_.ResizeLike(lengthsInput); inclusive_scan_wrapper( lengthsInput.template data<int>(), len_length, &inclusive_scan_buffer_, &inclusive_scan_length_buffer_, &context_); auto* prefix_sum_length_data = inclusive_scan_length_buffer_.template data<int>(); int N = dataSize; int post = dataInput.size_from_dim(1); auto maxThreads = GetDeviceProperty(CaffeCudaGetDevice()).maxThreadsPerBlock; if (SparseFused) { const InType* in_data = dataInput.template data<InType>(); if (post <= maxThreads) { int multiple = ::min(maxThreads / post, SEGREDUCE_MINBLOCKS); dim3 block(post, multiple); size_t smem = sizeof(T) * post * multiple; // calling cuda kernel with ExactBlock = true, Average = false hipLaunchKernelGGL(( sparse_length_sum_kernel<InType, T, IndexType, true, false>) , dim3(len_length), dim3(block), smem, context_.cuda_stream(), in_data, out_data, prefix_sum_length_data, indices, N, post, len_length, dataToReduceSize); } else { // calling cuda kernel with ExactBlock = false, Average = false hipLaunchKernelGGL(( sparse_length_sum_kernel<InType, T, IndexType, false, false>) , dim3(len_length), dim3(maxThreads), 0, context_.cuda_stream(), in_data, out_data, prefix_sum_length_data, indices, N, post, len_length, dataToReduceSize); } } else { const T* in_data = dataInput.template data<T>(); if (post <= maxThreads) { hipLaunchKernelGGL(( length_sum_kernel<T, true, false>) , dim3(len_length), dim3(post), 0, context_.cuda_stream(), in_data, out_data, prefix_sum_length_data, N, post, len_length); } else { hipLaunchKernelGGL(( length_sum_kernel<T, true, false>) , dim3(len_length), dim3(maxThreads), 0, context_.cuda_stream(), in_data, out_data, prefix_sum_length_data, N, post, len_length); } } return true; } enum { DATA = 0, INDICES = 1, LENGTHS = 1 + (SparseFused ? 1 : 0) }; private: // menber field to manage memory Tensor inclusive_scan_buffer_{CUDA}; Tensor inclusive_scan_length_buffer_{CUDA}; }; template <typename T, class Context = CUDAContext, bool SparseFused = true> class CUDASparseLengthsMeanOp : public Operator<CUDAContext> { public: USE_OPERATOR_CONTEXT_FUNCTIONS; template <class... Args> explicit CUDASparseLengthsMeanOp(Args&&... args) : Operator<CUDAContext>(std::forward<Args>(args)...) {} ~CUDASparseLengthsMeanOp() {} bool RunOnDevice() override { if (SparseFused) { return DispatchHelper<TensorTypes<int32_t, int64_t>>::call( this, Input(INDICES)); } else { // type doesn't matter return DoRunWithType<int32_t>(); } } template <typename IndexType> bool DoRunWithType() { if (SparseFused) { return DispatchHelper<TensorTypes2<float, at::Half>, IndexType>::call( this, Input(DATA)); } else { return DoRunWithType2<IndexType, T>(); } } template <typename IndexType, typename InType> bool DoRunWithType2() { auto& dataInput = Input(DATA); auto& lengthsInput = Input(LENGTHS); CAFFE_ENFORCE_EQ(1, lengthsInput.dim(), "LENGTHS must be a vector"); const int64_t dataSize = dataInput.dim(0); // Either first dim the data or how much we pull in indexies from it int64_t dataToReduceSize; const int64_t outputSize = lengthsInput.dim(0); const int len_length = outputSize; auto shape = dataInput.sizes().vec(); shape[0] = outputSize; auto* output = Output(0, shape, at::dtype<T>()); T* out_data = output->template mutable_data<T>(); if (len_length <= 0) { // return early to avoid invalid empty kernel return true; } const IndexType* indices; if (SparseFused) { // static if auto& indicesInput = Input(INDICES); CAFFE_ENFORCE_EQ(1, indicesInput.dim(), "INDICES must be a vector"); indices = indicesInput.template data<IndexType>(); dataToReduceSize = indicesInput.dim(0); } else { dataToReduceSize = dataSize; } // only compute this the first time inclusive_scan_length_buffer_.ResizeLike(lengthsInput); inclusive_scan_wrapper( lengthsInput.template data<int>(), len_length, &inclusive_scan_buffer_, &inclusive_scan_length_buffer_, &context_); auto* prefix_sum_length_data = inclusive_scan_length_buffer_.template data<int>(); int N = dataSize; int post = dataInput.size_from_dim(1); auto maxThreads = GetDeviceProperty(CaffeCudaGetDevice()).maxThreadsPerBlock; if (SparseFused) { const InType* in_data = dataInput.template data<InType>(); if (post <= maxThreads) { int multiple = ::min(maxThreads / post, SEGREDUCE_MINBLOCKS); dim3 block(post, multiple); size_t smem = sizeof(T) * post * multiple; // calling cuda kernel with ExactBlock = true, Average = true hipLaunchKernelGGL(( sparse_length_sum_kernel<InType, T, IndexType, true, true>) , dim3(len_length), dim3(block), smem, context_.cuda_stream(), in_data, out_data, prefix_sum_length_data, indices, N, post, len_length, dataToReduceSize); } else { // calling cuda kernel with ExactBlock = false, Average = true hipLaunchKernelGGL(( sparse_length_sum_kernel<InType, T, IndexType, false, true>) , dim3(len_length), dim3(maxThreads), 0, context_.cuda_stream(), in_data, out_data, prefix_sum_length_data, indices, N, post, len_length, dataToReduceSize); } } else { const T* in_data = dataInput.template data<T>(); if (post <= maxThreads) { // calling cuda kernel with ExactBlock = true, Average = true hipLaunchKernelGGL(( length_sum_kernel<T, true, true>) , dim3(len_length), dim3(post), 0, context_.cuda_stream(), in_data, out_data, prefix_sum_length_data, N, post, len_length); } else { // calling cuda kernel with ExactBlock = true, Average = true hipLaunchKernelGGL(( length_sum_kernel<T, true, true>) , dim3(len_length), dim3(maxThreads), 0, context_.cuda_stream(), in_data, out_data, prefix_sum_length_data, N, post, len_length); } } return true; } enum { DATA = 0, INDICES = 1, LENGTHS = 1 + (SparseFused ? 1 : 0) }; private: // menber field to manage memory Tensor inclusive_scan_buffer_{CUDA}; Tensor inclusive_scan_length_buffer_{CUDA}; }; template <typename T, class Context = CUDAContext, bool SparseFused = true> class CUDASparseLengthsMaxOp : public Operator<CUDAContext> { public: USE_OPERATOR_CONTEXT_FUNCTIONS; template <class... Args> explicit CUDASparseLengthsMaxOp(Args&&... args) : Operator<CUDAContext>(std::forward<Args>(args)...) {} ~CUDASparseLengthsMaxOp() {} bool RunOnDevice() override { if (SparseFused) { return DispatchHelper<TensorTypes<int32_t, int64_t>>::call( this, Input(INDICES)); } else { // type doesn't matter return DoRunWithType<int32_t>(); } } template <typename IndexType> bool DoRunWithType() { auto& dataInput = Input(0); auto& lengthsInput = Input(LENGTHS); CAFFE_ENFORCE_EQ(1, lengthsInput.dim(), "LENGTHS must be a vector"); const int64_t dataSize = dataInput.dim(0); // Either first dim the data or how much we pull in indexies from it int64_t dataToReduceSize; const int64_t outputSize = lengthsInput.dim(0); int len_length = outputSize; auto shape = dataInput.sizes().vec(); shape[0] = outputSize; auto* output = Output(0, shape, at::dtype<T>()); if (len_length <= 0) { // return early to avoid invalid empty kernel return true; } const IndexType* indices; if (SparseFused) { // static if auto& indicesInput = Input(INDICES); CAFFE_ENFORCE_EQ(1, indicesInput.dim(), "INDICES must be a vector"); indices = indicesInput.template data<IndexType>(); dataToReduceSize = indicesInput.dim(0); } else { dataToReduceSize = dataSize; } // only compute this the first time inclusive_scan_length_buffer_.ResizeLike(lengthsInput); inclusive_scan_wrapper( lengthsInput.template data<int>(), len_length, &inclusive_scan_buffer_, &inclusive_scan_length_buffer_, &context_); const T* in_data = dataInput.template data<T>(); T* out_data = output->template mutable_data<T>(); auto* prefix_sum_length_data = inclusive_scan_length_buffer_.template data<int>(); int N = dataSize; int post = dataInput.size_from_dim(1); auto maxThreads = GetDeviceProperty(CaffeCudaGetDevice()).maxThreadsPerBlock; T numeric_min = std::numeric_limits<T>::min(); if (SparseFused) { if (post <= maxThreads) { int multiple = ::min(maxThreads / post, SEGREDUCE_MINBLOCKS); dim3 block(post, multiple); size_t smem = sizeof(T) * post * multiple; hipLaunchKernelGGL(( sparse_length_max_kernel<T, IndexType, true>) , dim3(len_length), dim3(block), smem, context_.cuda_stream(), in_data, out_data, prefix_sum_length_data, indices, N, post, len_length, dataToReduceSize, numeric_min); } else { hipLaunchKernelGGL(( sparse_length_max_kernel<T, IndexType, false>) , dim3(len_length), dim3(maxThreads), 0, context_.cuda_stream(), in_data, out_data, prefix_sum_length_data, indices, N, post, len_length, dataToReduceSize, numeric_min); } } else { if (post <= maxThreads) { hipLaunchKernelGGL(( length_max_kernel<T, true>) , dim3(len_length), dim3(post), 0, context_.cuda_stream(), in_data, out_data, prefix_sum_length_data, N, post, len_length, numeric_min); } else { hipLaunchKernelGGL(( length_max_kernel<T, true>) , dim3(len_length), dim3(maxThreads), 0, context_.cuda_stream(), in_data, out_data, prefix_sum_length_data, N, post, len_length, numeric_min); } } return true; } enum { INDICES = 1, LENGTHS = 1 + (SparseFused ? 1 : 0) }; private: // menber field to manage memory Tensor inclusive_scan_buffer_{CUDA}; Tensor inclusive_scan_length_buffer_{CUDA}; }; template <typename T, class Context = CUDAContext, bool SparseFused = true> class CUDASparseLengthsWeightedSumOp : public Operator<CUDAContext> { public: USE_OPERATOR_CONTEXT_FUNCTIONS; CUDASparseLengthsWeightedSumOp(const OperatorDef& operator_def, Workspace* ws) : Operator<CUDAContext>(operator_def, ws) {} ~CUDASparseLengthsWeightedSumOp() {} bool RunOnDevice() override { return DispatchHelper<TensorTypes<int32_t, int64_t>>::call( this, Input(INDICES)); } template <typename IndexType> bool DoRunWithType() { auto& dataInput = Input(DATA); auto& weightsInput = Input(WEIGHTS); auto& indicesInput = Input(INDICES); auto& lengthsInput = Input(LENGTHS); CAFFE_ENFORCE_EQ(1, weightsInput.dim(), "WEIGHTS must be a vector"); CAFFE_ENFORCE_EQ(1, indicesInput.dim(), "INDICES must be a vector"); CAFFE_ENFORCE_EQ(1, lengthsInput.dim(), "LENGTHS must be a vector"); const int64_t dataSize = dataInput.dim(0); // Either first dim the data or how much we pull in indexies from it const int64_t dataToReduceSize = indicesInput.dim(0); const int64_t outputSize = lengthsInput.dim(0); const int len_length = outputSize; auto shape = dataInput.sizes().vec(); shape[0] = outputSize; auto* output = Output(0, shape, at::dtype<T>()); T* out_data = output->template mutable_data<T>(); if (len_length <= 0) { // return early to avoid invalid empty kernel return true; } inclusive_scan_length_buffer_.ResizeLike(lengthsInput); inclusive_scan_wrapper( lengthsInput.template data<int>(), len_length, &inclusive_scan_buffer_, &inclusive_scan_length_buffer_, &context_); const IndexType* indices = indicesInput.template data<IndexType>(); const T* in_data = dataInput.template data<T>(); const T* in_weights = weightsInput.template data<T>(); auto* prefix_sum_length_data = inclusive_scan_length_buffer_.template data<int>(); int N = dataSize; int post = dataInput.size_from_dim(1); auto maxThreads = GetDeviceProperty(CaffeCudaGetDevice()).maxThreadsPerBlock; if (post <= maxThreads) { int multiple = ::min(maxThreads / post, SEGREDUCE_MINBLOCKS); dim3 block(post, multiple); size_t smem = sizeof(T) * post * multiple; hipLaunchKernelGGL(( sparse_length_weighted_sum_kernel<T, IndexType, true>) , dim3(len_length), dim3(block), smem, context_.cuda_stream(), in_data, in_weights, out_data, prefix_sum_length_data, indices, N, post, len_length, dataToReduceSize); } else { hipLaunchKernelGGL(( sparse_length_weighted_sum_kernel<T, IndexType, false>) , dim3(len_length), dim3(maxThreads), 0, context_.cuda_stream(), in_data, in_weights, out_data, prefix_sum_length_data, indices, N, post, len_length, dataToReduceSize); } return true; } enum { DATA = 0, WEIGHTS = 1, INDICES = 2, LENGTHS = 3 }; private: // menber field to manage memory Tensor inclusive_scan_buffer_{CUDA}; Tensor inclusive_scan_length_buffer_{CUDA}; }; template <typename SIndex> __global__ void MaxSegmentKernel(int n, const SIndex* segment_ids, SIndex* max_segment) { typedef hipcub::BlockReduce<SIndex, CAFFE_CUDA_NUM_THREADS> BlockReduce; __shared__ typename BlockReduce::TempStorage temp_storage; int mx = 0; for (int j = threadIdx.x; j < n; j += blockDim.x) { mx = segment_ids[j] > mx ? segment_ids[j] : mx; } SIndex max_seg = BlockReduce(temp_storage).Reduce(mx, hipcub::Max()); if (threadIdx.x == 0) { *max_segment = max_seg; } } template <typename SIndex, typename T> __global__ void UnsortedSegmentSumKernel( int n, int slize_sz, const SIndex* segments, const T* data, T* out, int* scales) { CUDA_1D_KERNEL_LOOP(i, n) { int slice_idx = i / slize_sz; int j = i % slize_sz; SIndex segment = segments[slice_idx]; atomicAdd(&out[segment * slize_sz + j], data[i]); if (scales && j == 0) { atomicAdd(&scales[segment], 1); } } } template <typename SIndex, typename T> __global__ void SegmentScalingKernel(int m, int slize_sz, const int* scales, T* out) { CUDA_1D_KERNEL_LOOP(i, m) { int scale = scales[i / slize_sz]; out[i] = scale > 0 ? out[i] / scale : 0.0; // avoid 0/0 division } } template <typename T, typename SIndex, bool mean> class CUDAUnsortedSegmentSumOp : public Operator<CUDAContext> { public: USE_OPERATOR_FUNCTIONS(CUDAContext); CUDAUnsortedSegmentSumOp(const OperatorDef& operator_def, Workspace* ws) : Operator<CUDAContext>(operator_def, ws) {} ~CUDAUnsortedSegmentSumOp() {} bool RunOnDevice() override { auto& data = Input(0); auto& segment_ids = Input(1); if (segment_ids.numel() == 0 || data.numel() == 0) { // Special handling for empty input auto dims = data.sizes().vec(); if (dims.size() > 0) { dims[0] = 0; } Output(0, dims, at::dtype<T>()); return true; } CAFFE_ENFORCE_EQ(1, segment_ids.dim(), "SEGMENT_IDS must be a vector"); int64_t slize_sz = data.size_from_dim(1); ReinitializeTensor(&K_tensor_, {1}, at::dtype<SIndex>().device(CUDA)); // Get maximum segment id so we can size the output. // This must be done synchronously with host. if (segment_ids.numel() > 4096) { // when the input size is large, device reduce is better. size_t tmp_storage_bytes = 0; // the first call to `Max` do nothing, but set correct tmp_storage_bytes. hipcub::DeviceReduce::Max( nullptr, tmp_storage_bytes, segment_ids.template data<SIndex>(), // input device data K_tensor_.template mutable_data<SIndex>(), // output device data segment_ids.numel(), // number of items context_.cuda_stream()); // the second call do the real computation. ReinitializeTensor( &buffer_tensor_, {static_cast<int64_t>(tmp_storage_bytes)}, at::dtype<char>().device(CUDA)); hipcub::DeviceReduce::Max( static_cast<void*>(buffer_tensor_.mutable_data<char>()), tmp_storage_bytes, segment_ids.template data<SIndex>(), // input device data K_tensor_.template mutable_data<SIndex>(), // output device data segment_ids.numel(), // number of items context_.cuda_stream()); } else { hipLaunchKernelGGL(( MaxSegmentKernel<SIndex>) , dim3(1), dim3(CAFFE_CUDA_NUM_THREADS), 0, context_.cuda_stream(), segment_ids.numel(), segment_ids.template data<SIndex>(), K_tensor_.mutable_data<SIndex>()); } SIndex K = 0; context_.CopyBytesToCPU( sizeof(SIndex), K_tensor_.template data<SIndex>(), &K); context_.FinishDeviceComputation(); auto dims = data.sizes().vec(); dims[0] = K + 1; auto* output = Output(0, dims, at::dtype<T>()); // Clear the output as we will be accumulating the values math::Set<T, CUDAContext>( output->numel(), T(0), output->template mutable_data<T>(), &context_); if (!mean) { hipLaunchKernelGGL(( UnsortedSegmentSumKernel<SIndex, T>) , dim3(CAFFE_GET_BLOCKS(data.numel())), dim3(CAFFE_CUDA_NUM_THREADS), 0, context_.cuda_stream(), data.numel(), slize_sz, segment_ids.template data<SIndex>(), data.template data<T>(), output->template mutable_data<T>(), nullptr); } else { // For mean, we need to compute scaling factors ReinitializeTensor( &scaling_factors_, {K + 1}, at::dtype<int>().device(CUDA)); math::Set<int, CUDAContext>( scaling_factors_.numel(), int(0), scaling_factors_.template mutable_data<int>(), &context_); hipLaunchKernelGGL(( UnsortedSegmentSumKernel<SIndex, T>) , dim3(CAFFE_GET_BLOCKS(data.numel())), dim3(CAFFE_CUDA_NUM_THREADS), 0, context_.cuda_stream(), data.numel(), slize_sz, segment_ids.template data<SIndex>(), data.template data<T>(), output->template mutable_data<T>(), scaling_factors_.template mutable_data<int>()); // Divide by the scaling factors to get means hipLaunchKernelGGL(( SegmentScalingKernel<SIndex, T>) , dim3(CAFFE_GET_BLOCKS(output->numel())), dim3(CAFFE_CUDA_NUM_THREADS), 0, context_.cuda_stream(), output->numel(), slize_sz, scaling_factors_.template data<int>(), output->template mutable_data<T>()); } return true; } private: Tensor buffer_tensor_; Tensor K_tensor_; Tensor scaling_factors_; // for mean }; template <typename SIndex> __global__ void segment_lengths_kernel(int N, const SIndex* X, SIndex* Y) { CUDA_1D_KERNEL_LOOP(i, N) { atomicAdd(&Y[X[i]], 1); } } template <typename T, typename SIndex, bool LOGEXP = false> __global__ void sorted_segment_mean_kernel( const SIndex K, const int N, const SIndex* S, const SIndex* I, const T* X, T* Y) { for (int sId = blockIdx.x; sId < K; sId += gridDim.x) { const int start_index = sId > 0 ? S[sId] * N : 0; const int y_start_index = sId * N; for (int i = threadIdx.x; i < N; i += blockDim.x) { T sum = 0.0; for (int j = 0; j < I[sId]; ++j) { const T x_i_j = X[start_index + j * N + i]; sum += LOGEXP ? exp(x_i_j) : x_i_j; } const T norm_sum = sum / I[sId]; Y[y_start_index + i] = LOGEXP ? log(norm_sum) : norm_sum; } } } template <typename T, typename SIndex, bool LOGEXP, class Context = CUDAContext> class SortedSegmentRangeMeanOp : public Operator<Context> { public: USE_OPERATOR_CONTEXT_FUNCTIONS; SortedSegmentRangeMeanOp(const OperatorDef& operator_def, Workspace* ws) : Operator<CUDAContext>(operator_def, ws) {} ~SortedSegmentRangeMeanOp() {} bool RunOnDevice() override { const auto& input = Input(0); const auto& indices = Input(1); int M = input.dim32(0); int N = input.size_from_dim(1); auto* output = Output(0); auto dims = input.sizes().vec(); SIndex K = 0; context_.CopyBytesToCPU( sizeof(SIndex), indices.template data<SIndex>() + indices.size() - 1, &K); context_.FinishDeviceComputation(); K += 1; dims[0] = K; if (segment_len_.size() != K) { segment_len_.Resize(K); segment_len_prefix_sum_.Resize(K); } output->Resize(dims); math::Set<SIndex, CUDAContext>( segment_len_.size(), 0, segment_len_.template mutable_data<SIndex>(), &context_); hipLaunchKernelGGL(( segment_lengths_kernel), dim3(CAFFE_GET_BLOCKS(indices.size())), dim3(CAFFE_CUDA_NUM_THREADS), 0, context_.cuda_stream(), indices.size(), indices.template data<SIndex>(), segment_len_.template mutable_data<SIndex>()); size_t temp_storage_bytes = 0; hipcub::DeviceScan::ExclusiveSum( nullptr, temp_storage_bytes, segment_len_.template data<SIndex>(), segment_len_prefix_sum_.template mutable_data<SIndex>(), K, context_.cuda_stream()); auto buffer_size = (temp_storage_bytes + sizeof(T)) / sizeof(T); prefix_buffer_.Resize(buffer_size); void* dev_temp_storage = static_cast<void*>(prefix_buffer_.mutable_data<T>()); hipcub::DeviceScan::ExclusiveSum( dev_temp_storage, temp_storage_bytes, segment_len_.template data<SIndex>(), segment_len_prefix_sum_.template mutable_data<SIndex>(), K, context_.cuda_stream()); hipLaunchKernelGGL(( sorted_segment_mean_kernel<T, SIndex, LOGEXP>) , dim3(::min(K, CAFFE_MAXIMUM_NUM_BLOCKS)), dim3(CAFFE_CUDA_NUM_THREADS), 0, context_.cuda_stream(), K, N, segment_len_prefix_sum_.template data<SIndex>(), segment_len_.template data<SIndex>(), input.template data<T>(), output->template mutable_data<T>()); return true; } private: Tensor segment_len_{CUDA}; // for mean Tensor segment_len_prefix_sum_{CUDA}; Tensor prefix_buffer_{CUDA}; }; template <typename T, typename SIndex, bool LOGEXP = false> __global__ void sorted_segment_mean_gradient_kernel( const int M, const int N, const T* X, const T* Y, const T* dY, const SIndex* I, const SIndex* S, T* dX) { CUDA_1D_KERNEL_LOOP(i, M * N) { const int sId = I[i / N]; const int sSize = S[sId]; const int yId = N * sId + i % N; dX[i] = LOGEXP ? dY[yId] * exp(X[i] - Y[yId]) / sSize : dY[yId] / sSize; } } template <typename T, typename SIndex, bool LOGEXP, class Context = CUDAContext> class SortedSegmentRangeMeanGradientOp : public Operator<Context> { public: USE_OPERATOR_CONTEXT_FUNCTIONS; SortedSegmentRangeMeanGradientOp( const OperatorDef& operator_def, Workspace* ws) : Operator<CUDAContext>(operator_def, ws) {} ~SortedSegmentRangeMeanGradientOp() {} bool RunOnDevice() override { const auto& X = Input(0); const auto& Y = Input(1); const auto& dY = Input(2); const auto& I = Input(3); auto* dX = Output(0, X.sizes(), at::dtype<T>()); const int M = X.dim32(0); const int N = X.size_from_dim(1); SIndex K = 0; context_.CopyBytesToCPU( sizeof(SIndex), I.template data<SIndex>() + I.numel() - 1, &K); K += 1; if (segment_len_.numel() != K) { ReinitializeTensor(&segment_len_, {K}, at::dtype<SIndex>().device(CUDA)); } math::Set<SIndex, CUDAContext>( segment_len_.numel(), 0, segment_len_.template mutable_data<SIndex>(), &context_); hipLaunchKernelGGL(( segment_lengths_kernel), dim3(CAFFE_GET_BLOCKS(I.numel())), dim3(CAFFE_CUDA_NUM_THREADS), 0, context_.cuda_stream(), I.numel(), I.template data<SIndex>(), segment_len_.template mutable_data<SIndex>()); hipLaunchKernelGGL(( sorted_segment_mean_gradient_kernel<T, SIndex, LOGEXP>) , dim3(CAFFE_GET_BLOCKS(dX->numel())), dim3(CAFFE_CUDA_NUM_THREADS), 0, context_.cuda_stream(), M, N, X.template data<T>(), Y.template data<T>(), dY.template data<T>(), I.template data<SIndex>(), segment_len_.template data<SIndex>(), dX->template mutable_data<T>()); return true; } private: Tensor segment_len_; // for mean }; REGISTER_CUDA_OPERATOR_STR( "LengthsSum", CUDASparseLengthsSumOp<float, CUDAContext, false>); REGISTER_CUDA_OPERATOR_STR( "SparseLengthsSum", CUDASparseLengthsSumOp<float, CUDAContext, true>); REGISTER_CUDA_OPERATOR_STR( "LengthsMean", CUDASparseLengthsMeanOp<float, CUDAContext, false>); REGISTER_CUDA_OPERATOR_STR( "SparseLengthsMean", CUDASparseLengthsMeanOp<float, CUDAContext, true>); REGISTER_CUDA_OPERATOR_STR( "LengthsMax", CUDASparseLengthsMaxOp<float, CUDAContext, false>); REGISTER_CUDA_OPERATOR_STR( "SparseLengthsMax", CUDASparseLengthsMaxOp<float, CUDAContext, true>); REGISTER_CUDA_OPERATOR_STR( "SparseLengthsWeightedSum", CUDASparseLengthsWeightedSumOp<float, CUDAContext, true>); REGISTER_CUDA_OPERATOR_STR( "UnsortedSegmentSum", CUDAUnsortedSegmentSumOp<float, int, false>); REGISTER_CUDA_OPERATOR_STR( "UnsortedSegmentMean", CUDAUnsortedSegmentSumOp<float, int, true>); REGISTER_CUDA_OPERATOR_STR( "SortedSegmentRangeMean", SortedSegmentRangeMeanOp<float, int, false>); REGISTER_CUDA_OPERATOR_STR( "SortedSegmentRangeLogMeanExp", SortedSegmentRangeMeanOp<float, int, true>); REGISTER_CUDA_OPERATOR_STR( "SortedSegmentRangeMeanGradient", SortedSegmentRangeMeanGradientOp<float, int, false>); REGISTER_CUDA_OPERATOR_STR( "SortedSegmentRangeLogMeanExpGradient", SortedSegmentRangeMeanGradientOp<float, int, true>); template <typename T, class Context = CUDAContext> class CUDASparseLengthsSumGradientWithIndicesOp : public Operator<CUDAContext> { public: USE_OPERATOR_CONTEXT_FUNCTIONS; CUDASparseLengthsSumGradientWithIndicesOp( const OperatorDef& operator_def, Workspace* ws) : Operator<CUDAContext>(operator_def, ws) {} ~CUDASparseLengthsSumGradientWithIndicesOp() {} bool RunOnDevice() override { auto& segmentGradsInput = Input(0); auto& lengthsInput = Input(1); auto& indicesInput = Input(2); CAFFE_ENFORCE_EQ(1, lengthsInput.dim(), "LENGTHS must be a vector"); const int len_length = lengthsInput.dim(0); CAFFE_ENFORCE(segmentGradsInput.dim() > 0); CAFFE_ENFORCE(len_length == segmentGradsInput.dim(0)); auto shape = segmentGradsInput.sizes().vec(); int output_0dim = indicesInput.dim(0); shape[0] = output_0dim; auto* dataGradsOutput = Output(0, shape, at::dtype<T>()); T* out_data = dataGradsOutput->template mutable_data<T>(); if (len_length <= 0) { // return early to avoid invalid empty kernel return true; } inclusive_scan_length_buffer_.ResizeLike(lengthsInput); inclusive_scan_wrapper( lengthsInput.template data<int>(), len_length, &inclusive_scan_buffer_, &inclusive_scan_length_buffer_, &context_); // compute output size using length auto* prefix_sum_length_data = inclusive_scan_length_buffer_.template data<int>(); const T* in_data = segmentGradsInput.template data<T>(); int N = output_0dim; int post = segmentGradsInput.size_from_dim(1); auto maxThreads = GetDeviceProperty(CaffeCudaGetDevice()).maxThreadsPerBlock; if (post <= maxThreads) { int multiple = ::min(maxThreads / post, SEGREDUCE_MINBLOCKS); dim3 block(post, multiple); // calling cuda kernel with ExactBlock = true, Average = false hipLaunchKernelGGL(( length_sum_gradient_kernel<T, true, false>) , dim3(len_length), dim3(block), 0, context_.cuda_stream(), in_data, out_data, prefix_sum_length_data, N, post, len_length); } else { // calling cuda kernel with ExactBlock = false, Average = false hipLaunchKernelGGL(( length_sum_gradient_kernel<T, false, false>) , dim3(len_length), dim3(maxThreads), 0, context_.cuda_stream(), in_data, out_data, prefix_sum_length_data, N, post, len_length); } return true; } private: // menber field to manage memory Tensor inclusive_scan_buffer_{CUDA}; Tensor inclusive_scan_length_buffer_{CUDA}; }; template <typename T, class Context = CUDAContext> class CUDASparseLengthsMeanGradientWithIndicesOp : public Operator<CUDAContext> { public: USE_OPERATOR_CONTEXT_FUNCTIONS; CUDASparseLengthsMeanGradientWithIndicesOp( const OperatorDef& operator_def, Workspace* ws) : Operator<CUDAContext>(operator_def, ws) {} ~CUDASparseLengthsMeanGradientWithIndicesOp() {} bool RunOnDevice() override { auto& segmentGradsInput = Input(0); auto& lengthsInput = Input(1); auto& indicesInput = Input(2); CAFFE_ENFORCE_EQ(1, lengthsInput.dim(), "LENGTHS must be a vector"); const int len_length = lengthsInput.dim(0); CAFFE_ENFORCE(segmentGradsInput.dim() > 0); CAFFE_ENFORCE(len_length == segmentGradsInput.dim(0)); auto shape = segmentGradsInput.sizes().vec(); int output_0dim = indicesInput.dim(0); shape[0] = output_0dim; auto* dataGradsOutput = Output(0, shape, at::dtype<T>()); T* out_data = dataGradsOutput->template mutable_data<T>(); if (len_length <= 0) { // return early to avoid invalid empty kernel return true; } inclusive_scan_length_buffer_.ResizeLike(lengthsInput); inclusive_scan_wrapper( lengthsInput.template data<int>(), len_length, &inclusive_scan_buffer_, &inclusive_scan_length_buffer_, &context_); // compute output size using length auto* prefix_sum_length_data = inclusive_scan_length_buffer_.template data<int>(); const T* in_data = segmentGradsInput.template data<T>(); int N = output_0dim; int post = segmentGradsInput.size_from_dim(1); auto maxThreads = GetDeviceProperty(CaffeCudaGetDevice()).maxThreadsPerBlock; if (post <= maxThreads) { int multiple = ::min(maxThreads / post, SEGREDUCE_MINBLOCKS); dim3 block(post, multiple); // calling cuda kernel with ExactBlock = true, Average = true hipLaunchKernelGGL(( length_sum_gradient_kernel<T, true, true>) , dim3(len_length), dim3(block), 0, context_.cuda_stream(), in_data, out_data, prefix_sum_length_data, N, post, len_length); } else { // calling cuda kernel with ExactBlock = false, Average = true hipLaunchKernelGGL(( length_sum_gradient_kernel<T, false, true>) , dim3(len_length), dim3(maxThreads), 0, context_.cuda_stream(), in_data, out_data, prefix_sum_length_data, N, post, len_length); } return true; } private: // menber field to manage memory Tensor inclusive_scan_buffer_{CUDA}; Tensor inclusive_scan_length_buffer_{CUDA}; }; template <typename T, class Context = CUDAContext> class CUDASparseLengthsWeightedSumGradientWithIndicesOp : public Operator<CUDAContext> { public: USE_OPERATOR_CONTEXT_FUNCTIONS; CUDASparseLengthsWeightedSumGradientWithIndicesOp( const OperatorDef& operator_def, Workspace* ws) : Operator<CUDAContext>(operator_def, ws) {} ~CUDASparseLengthsWeightedSumGradientWithIndicesOp() {} bool RunOnDevice() override { auto& weightsInput = Input(0); auto& segmentGradsInput = Input(1); auto& lengthsInput = Input(2); auto& indicesInput = Input(3); CAFFE_ENFORCE_EQ(1, lengthsInput.dim(), "LENGTHS must be a vector"); CAFFE_ENFORCE_EQ(1, weightsInput.dim(), "WEIGHTS must be a vector"); const int len_length = lengthsInput.dim(0); CAFFE_ENFORCE(segmentGradsInput.dim() > 0); CAFFE_ENFORCE(len_length == segmentGradsInput.dim(0)); auto shape = segmentGradsInput.sizes().vec(); int output_0dim = indicesInput.dim(0); shape[0] = output_0dim; auto* dataGradsOutput = Output(0, shape, at::dtype<T>()); T* out_data = dataGradsOutput->template mutable_data<T>(); if (len_length <= 0) { // return early to avoid invalid empty kernel return true; } inclusive_scan_length_buffer_.ResizeLike(lengthsInput); inclusive_scan_wrapper( lengthsInput.template data<int>(), len_length, &inclusive_scan_buffer_, &inclusive_scan_length_buffer_, &context_); // compute output size using length auto* prefix_sum_length_data = inclusive_scan_length_buffer_.template data<int>(); const T* in_data = segmentGradsInput.template data<T>(); const T* in_weights = weightsInput.template data<T>(); int N = output_0dim; int post = segmentGradsInput.size_from_dim(1); auto maxThreads = GetDeviceProperty(CaffeCudaGetDevice()).maxThreadsPerBlock; if (post < maxThreads) { int multiple = ::min(maxThreads / post, SEGREDUCE_MINBLOCKS); dim3 block(post, multiple); hipLaunchKernelGGL(( length_weighted_sum_gradient_kernel<T, true>) , dim3(len_length), dim3(block), 0, context_.cuda_stream(), in_data, in_weights, out_data, prefix_sum_length_data, N, post, len_length); } else { hipLaunchKernelGGL(( length_weighted_sum_gradient_kernel<T, false>) , dim3(len_length), dim3(maxThreads), 0, context_.cuda_stream(), in_data, in_weights, out_data, prefix_sum_length_data, N, post, len_length); } return true; } private: // menber field to manage memory Tensor inclusive_scan_buffer_{CUDA}; Tensor inclusive_scan_length_buffer_{CUDA}; }; template <typename T, bool ExactBlock = false> __global__ void length_max_gradient_kernel( const T* __restrict__ grad_in, T* __restrict__ grad_out, const T* data_in, const T* data_out, const int* __restrict__ prefix_sum_length_data, int N, int post, int len_length) { // len_length blocks int group = blockIdx.x; int start = group == 0 ? 0 : prefix_sum_length_data[group - 1]; int end = prefix_sum_length_data[group]; CUDA_KERNEL_ASSERT(start <= N); CUDA_KERNEL_ASSERT(end <= N); if (ExactBlock) { grad_out += threadIdx.x; grad_in += threadIdx.x; data_in += threadIdx.x; data_out += threadIdx.x; for (int line = start + threadIdx.y; line < end; line += blockDim.y) { if (data_in[line * post] == data_out[group * post]) { grad_out[line * post] = grad_in[group * post]; } else { grad_out[line * post] = 0; } } } else { for (int i = threadIdx.x; i < post; i += blockDim.x) { for (int line = start; line < end; ++line) { if (data_in[line * post + i] == data_out[group * post + i]) { grad_out[line * post + i] = grad_in[group * post + i]; } else { grad_out[line * post + i] = 0; } } } } } template <typename T, class Context = CUDAContext> class CUDALengthsMaxWithMainInputAndForwardOutputGradientOp : public Operator<CUDAContext> { public: USE_OPERATOR_CONTEXT_FUNCTIONS; CUDALengthsMaxWithMainInputAndForwardOutputGradientOp( const OperatorDef& operator_def, Workspace* ws) : Operator<CUDAContext>(operator_def, ws) {} ~CUDALengthsMaxWithMainInputAndForwardOutputGradientOp() {} bool RunOnDevice() override { return DispatchHelper<TensorTypes<int32_t, float>>::call(this, Input(3)); } template <typename IndexType> bool DoRunWithType() { auto& segmentGradsInput = Input(1); auto& lengthsInput = Input(2); auto& dataInput = Input(3); auto& dataOutput = Input(0); // based on CPU version CAFFE_ENFORCE_EQ(1, lengthsInput.dim(), "LENGTHS must be a vector"); int len_length = lengthsInput.dim(0); CAFFE_ENFORCE(segmentGradsInput.dim() > 0); CAFFE_ENFORCE(len_length == segmentGradsInput.dim(0)); inclusive_scan_length_buffer_.ResizeLike(lengthsInput); inclusive_scan_wrapper( lengthsInput.template data<int>(), len_length, &inclusive_scan_buffer_, &inclusive_scan_length_buffer_, &context_); // compute output size using length auto* prefix_sum_length_data = inclusive_scan_length_buffer_.template data<int>(); auto shape = dataInput.sizes().vec(); auto* dataGradsOutput = Output(0, shape, at::dtype<T>()); const T* in_data = segmentGradsInput.template data<T>(); T* out_data = dataGradsOutput->template mutable_data<T>(); int N = dataInput.dim(0); int post = segmentGradsInput.size_from_dim(1); if (len_length <= 0) { // return early to avoid invalid empty kernel return true; } auto maxThreads = GetDeviceProperty(CaffeCudaGetDevice()).maxThreadsPerBlock; if (post <= maxThreads) { int multiple = ::min(maxThreads / post, SEGREDUCE_MINBLOCKS); dim3 block(post, multiple); hipLaunchKernelGGL(( length_max_gradient_kernel<T, true>) , dim3(len_length), dim3(block), 0, context_.cuda_stream(), in_data, out_data, dataInput.template data<T>(), dataOutput.template data<T>(), prefix_sum_length_data, N, post, len_length); } else { hipLaunchKernelGGL(( length_max_gradient_kernel<T, false>) , dim3(len_length), dim3(maxThreads), 0, context_.cuda_stream(), in_data, out_data, dataInput.template data<T>(), dataOutput.template data<T>(), prefix_sum_length_data, N, post, len_length); } return true; } private: // menber field to manage memory Tensor inclusive_scan_buffer_{CUDA}; Tensor inclusive_scan_length_buffer_{CUDA}; }; template <typename T, class Context = CUDAContext> class CUDASparseLengthsIndicesInGradientWeightedSumWithMainInputGradientOp : public Operator<CUDAContext> { public: USE_OPERATOR_CONTEXT_FUNCTIONS; CUDASparseLengthsIndicesInGradientWeightedSumWithMainInputGradientOp( const OperatorDef& operator_def, Workspace* ws) : Operator<CUDAContext>(operator_def, ws) {} ~CUDASparseLengthsIndicesInGradientWeightedSumWithMainInputGradientOp() {} bool RunOnDevice() override { return DispatchHelper<TensorTypes<int32_t, int64_t>>::call(this, Input(4)); } template <typename IndexType> bool DoRunWithType() { auto& weightsInput = Input(0); auto& segmentGradsInput = Input(1); auto& lengthsInput = Input(2); auto& dataInput = Input(3); auto& indicesInput = Input(4); CAFFE_ENFORCE_EQ(1, lengthsInput.dim(), "LENGTHS must be a vector"); CAFFE_ENFORCE_EQ(1, weightsInput.dim(), "WEIGHTS must be a vector"); const int len_length = lengthsInput.dim(0); CAFFE_ENFORCE(segmentGradsInput.dim() > 0); CAFFE_ENFORCE(len_length == segmentGradsInput.dim(0)); auto shape = segmentGradsInput.sizes().vec(); int output_0dim = indicesInput.dim(0); shape[0] = output_0dim; auto* dataGradsOutput = Output(0, shape, at::dtype<T>()); auto* weightGradsOutput = Output(1, indicesInput.sizes(), at::dtype<T>()); T* out_data_grads = dataGradsOutput->template mutable_data<T>(); T* out_weight_grads = weightGradsOutput->template mutable_data<T>(); if (len_length <= 0) { // return early to avoid invalid empty kernel return true; } inclusive_scan_length_buffer_.ResizeLike(lengthsInput); inclusive_scan_wrapper( lengthsInput.template data<int>(), len_length, &inclusive_scan_buffer_, &inclusive_scan_length_buffer_, &context_); // compute output size using length auto* prefix_sum_length_data = inclusive_scan_length_buffer_.template data<int>(); const T* in_data = dataInput.template data<T>(); const T* in_grads = segmentGradsInput.template data<T>(); const T* in_weights = weightsInput.template data<T>(); const IndexType* indices = indicesInput.template data<IndexType>(); int N = output_0dim; int post = segmentGradsInput.size_from_dim(1); if (post > 128) { hipLaunchKernelGGL(( length_weighted_sum_with_main_input_gradient_kernel<T, IndexType, 512>) , dim3(len_length), dim3(512), 0, context_.cuda_stream(), in_grads, in_weights, in_data, indices, out_data_grads, out_weight_grads, prefix_sum_length_data, N, post, len_length); } else if (post > 64) { hipLaunchKernelGGL(( length_weighted_sum_with_main_input_gradient_kernel<T, IndexType, 128>) , dim3(len_length), dim3(128), 0, context_.cuda_stream(), in_grads, in_weights, in_data, indices, out_data_grads, out_weight_grads, prefix_sum_length_data, N, post, len_length); } else if (post > 32) { hipLaunchKernelGGL(( length_weighted_sum_with_main_input_gradient_kernel<T, IndexType, 64>) , dim3(len_length), dim3(64), 0, context_.cuda_stream(), in_grads, in_weights, in_data, indices, out_data_grads, out_weight_grads, prefix_sum_length_data, N, post, len_length); } else { hipLaunchKernelGGL(( length_weighted_sum_with_main_input_gradient_kernel<T, IndexType, 32>) , dim3(len_length), dim3(32), 0, context_.cuda_stream(), in_grads, in_weights, in_data, indices, out_data_grads, out_weight_grads, prefix_sum_length_data, N, post, len_length); } return true; } private: // menber field to manage memory Tensor inclusive_scan_buffer_{CUDA}; Tensor inclusive_scan_length_buffer_{CUDA}; }; // Needed because name is auto-generated in segment_reduction_op.cc:224 REGISTER_CUDA_OPERATOR_STR( "LengthsMaxWithMainInputAndForwardOutputGradient", CUDALengthsMaxWithMainInputAndForwardOutputGradientOp<float, CUDAContext>); REGISTER_CUDA_OPERATOR( SparseLengthsIndicesInGradientWeightedSumGradient, CUDASparseLengthsWeightedSumGradientWithIndicesOp<float, CUDAContext>); REGISTER_CUDA_OPERATOR( SparseLengthsIndicesInGradientWeightedSumWithMainInputGradient, CUDASparseLengthsIndicesInGradientWeightedSumWithMainInputGradientOp< float, CUDAContext>); REGISTER_CUDA_OPERATOR( SparseLengthsIndicesInGradientSumGradient, CUDASparseLengthsSumGradientWithIndicesOp<float, CUDAContext>); REGISTER_CUDA_OPERATOR( LengthsIndicesInGradientSumGradient, CUDASparseLengthsSumGradientWithIndicesOp<float, CUDAContext>); REGISTER_CUDA_OPERATOR( SparseLengthsIndicesInGradientMeanGradient, CUDASparseLengthsMeanGradientWithIndicesOp<float, CUDAContext>); REGISTER_CUDA_OPERATOR( LengthsIndicesInGradientMeanGradient, CUDASparseLengthsMeanGradientWithIndicesOp<float, CUDAContext>); } // namespace caffe2 // Macro doesn't like comma using LengthsSumCUDAOp = caffe2::CUDASparseLengthsSumOp<float, caffe2::CUDAContext, false>; using LengthsMeanCUDAOp = caffe2::CUDASparseLengthsMeanOp<float, caffe2::CUDAContext, false>; using LengthsMaxCUDAOp = caffe2::CUDASparseLengthsMaxOp<float, caffe2::CUDAContext, false>; C10_EXPORT_CAFFE2_OP_TO_C10_CUDA(LengthsSum, LengthsSumCUDAOp); C10_EXPORT_CAFFE2_OP_TO_C10_CUDA(LengthsMean, LengthsMeanCUDAOp); C10_EXPORT_CAFFE2_OP_TO_C10_CUDA(LengthsMax, LengthsMaxCUDAOp); #undef SEGREDUCE_MINBLOCKS
5e26f2bc9c2bc51e50ece6a1adbb430f6d56193d.cu
#include <algorithm> #include "caffe2/core/operator.h" #include "caffe2/operators/segment_reduction_op.h" #include "caffe2/operators/segment_reduction_op_gpu.cuh" #include "caffe2/utils/math.h" namespace caffe2 { namespace { void inclusive_scan_wrapper( const int* length_data, int len_length, Tensor* temp_buffer, Tensor* prefix_sum_out, CUDAContext* context_) { // Retrieve buffer size size_t temp_storage_bytes = 0; cub::DeviceScan::InclusiveSum( NULL, temp_storage_bytes, length_data, prefix_sum_out->template mutable_data<int>(), len_length, context_->cuda_stream()); // Allocate temporary storage auto buffer_size = (temp_storage_bytes + sizeof(int)) / sizeof(int); temp_buffer->Resize(buffer_size); void* d_temp_storage = static_cast<void*>(temp_buffer->template mutable_data<int>()); // Run inclusive prefix sum cub::DeviceScan::InclusiveSum( d_temp_storage, temp_storage_bytes, length_data, prefix_sum_out->template mutable_data<int>(), len_length, context_->cuda_stream()); } template <typename T, bool ExactBlock = false, bool Average = false> #ifdef __HIP_PLATFORM_HCC__ C10_LAUNCH_BOUNDS_2(1024, SEGREDUCE_MINBLOCKS) #endif __global__ void length_sum_kernel( const T* __restrict__ in, T* __restrict__ out, const int* __restrict__ prefix_sum_length_data, int N, int post, int len_length) { // len_length blocks int group = blockIdx.x; int start = group == 0 ? 0 : prefix_sum_length_data[group - 1]; int end = prefix_sum_length_data[group]; CUDA_KERNEL_ASSERT(start <= N); CUDA_KERNEL_ASSERT(end <= N); if (ExactBlock) { in += threadIdx.x; T sum = (T)0; for (int line = start; line < end; ++line) { sum += in[line * post]; } if (Average && (end - start) > 1) { sum /= (end - start); } out[group * post + threadIdx.x] = sum; } else { for (int i = threadIdx.x; i < post; i += blockDim.x) { T sum = (T)0; for (int line = start; line < end; ++line) { sum += in[line * post + i]; } if (Average && (end - start) > 1) { sum /= (end - start); } out[group * post + i] = sum; } } } template <typename T, bool ExactBlock = false, bool Average = false> #ifdef __HIP_PLATFORM_HCC__ C10_LAUNCH_BOUNDS_2(1024, SEGREDUCE_MINBLOCKS) #endif __global__ void length_sum_gradient_kernel( const T* __restrict__ grad_in, T* __restrict__ grad_out, const int* __restrict__ prefix_sum_length_data, int N, int post, int len_length) { // len_length blocks int group = blockIdx.x; int start = group == 0 ? 0 : prefix_sum_length_data[group - 1]; int end = prefix_sum_length_data[group]; CUDA_KERNEL_ASSERT(start <= N); CUDA_KERNEL_ASSERT(end <= N); if (ExactBlock) { grad_out += threadIdx.x; grad_in += threadIdx.x; for (int line = start + threadIdx.y; line < end; line += blockDim.y) { grad_out[line * post] = grad_in[group * post]; if (Average && (end - start) > 1) { grad_out[line * post] /= (end - start); } } } else { for (int i = threadIdx.x; i < post; i += blockDim.x) { for (int line = start; line < end; ++line) { grad_out[line * post + i] = grad_in[group * post + i]; if (Average && (end - start) > 1) { grad_out[line * post + i] /= (end - start); } } } } } template <typename T, bool ExactBlock = false> #ifdef __HIP_PLATFORM_HCC__ C10_LAUNCH_BOUNDS_2(1024, SEGREDUCE_MINBLOCKS) #endif __global__ void length_max_kernel( const T* __restrict__ in, T* __restrict__ out, const int* __restrict__ prefix_sum_length_data, int N, int post, int len_length, const T numeric_min) { // len_length blocks int group = blockIdx.x; int start = group == 0 ? 0 : prefix_sum_length_data[group - 1]; int end = prefix_sum_length_data[group]; CUDA_KERNEL_ASSERT(start <= N); CUDA_KERNEL_ASSERT(end <= N); if (ExactBlock) { in += threadIdx.x; T max = numeric_min; for (int line = start; line < end; ++line) { T in_data = in[line * post]; max = max > in_data ? max : in_data; } // setting output to 0 to not break gradient max = max == numeric_min ? 0 : max; out[group * post + threadIdx.x] = max; } else { for (int i = threadIdx.x; i < post; i += blockDim.x) { T max = numeric_min; for (int line = start; line < end; ++line) { T in_data = in[line * post + i]; max = max > in_data ? max : in_data; } // setting output to 0 to not break gradient max = max == numeric_min ? 0 : max; out[group * post + i] = max; } } } template <typename T, bool ExactBlock = false> #ifdef __HIP_PLATFORM_HCC__ C10_LAUNCH_BOUNDS_2(1024, SEGREDUCE_MINBLOCKS) #endif __global__ void length_weighted_sum_gradient_kernel( const T* __restrict__ grad_in, const T* __restrict__ weights_in, T* __restrict__ grad_out, const int* __restrict__ prefix_sum_length_data, int N, int post, int len_length) { // len_length blocks int group = blockIdx.x; int start = group == 0 ? 0 : prefix_sum_length_data[group - 1]; int end = prefix_sum_length_data[group]; CUDA_KERNEL_ASSERT(start <= N); CUDA_KERNEL_ASSERT(end <= N); if (ExactBlock) { grad_out += threadIdx.x; grad_in += threadIdx.x; for (int line = start + threadIdx.y; line < end; line += blockDim.y) { grad_out[line * post] = weights_in[line] * grad_in[group * post]; } } else { for (int i = threadIdx.x; i < post; i += blockDim.x) { for (int line = start; line < end; ++line) { grad_out[line * post + i] = weights_in[line] * grad_in[group * post + i]; } } } } template <typename T, typename IndexType, int NumThreads> #ifdef __HIP_PLATFORM_HCC__ C10_LAUNCH_BOUNDS_2(1024, SEGREDUCE_MINBLOCKS) #endif __global__ void length_weighted_sum_with_main_input_gradient_kernel( const T* __restrict__ grad_in, const T* __restrict__ weights_in, const T* __restrict__ data_in, const IndexType* __restrict__ indices, T* __restrict__ data_grad_out, T* __restrict__ weights_grad_out, const int* __restrict__ prefix_sum_length_data, int N, int post, int len_length) { // len_length blocks int group = blockIdx.x; int start = group == 0 ? 0 : prefix_sum_length_data[group - 1]; int end = prefix_sum_length_data[group]; CUDA_KERNEL_ASSERT(start <= N); CUDA_KERNEL_ASSERT(end <= N); // todo figure this num threads thing typedef cub::BlockReduce<float, NumThreads> BlockReduce; __shared__ typename BlockReduce::TempStorage temp_storage; // TODO(wyiming): parallelize this outter loop for (int line = start; line < end; ++line) { T w_grad = 0; for (int i = threadIdx.x; i < post; i += blockDim.x) { auto g_in = grad_in[group * post + i]; data_grad_out[line * post + i] = weights_in[line] * g_in; w_grad += g_in * data_in[indices[line] * post + i]; } w_grad = BlockReduce(temp_storage).Reduce(w_grad, cub::Sum()); if (threadIdx.x == 0) { weights_grad_out[line] = w_grad; } __syncthreads(); } } template <typename T, typename IndexType, bool ExactBlock = false> #ifdef __HIP_PLATFORM_HCC__ C10_LAUNCH_BOUNDS_2(1024, SEGREDUCE_MINBLOCKS) #endif __global__ void sparse_length_max_kernel( const T* __restrict__ in, T* __restrict__ out, const int* __restrict__ prefix_sum_length_data, const IndexType* __restrict__ indices, int N, int post, int len_length, int len_indices, const T numeric_min) { // len_length blocks int group = blockIdx.x; int start = group == 0 ? 0 : prefix_sum_length_data[group - 1]; int end = prefix_sum_length_data[group]; CUDA_KERNEL_ASSERT(start <= len_indices); CUDA_KERNEL_ASSERT(end <= len_indices); extern __shared__ T reduceVals[]; if (ExactBlock) { T max = numeric_min; in += threadIdx.x; for (int line = start + threadIdx.y; line < end; line += blockDim.y) { T in_data = in[indices[line] * post]; max = max > in_data ? max : in_data; } reduceVals[threadIdx.y * blockDim.x + threadIdx.x] = max; __syncthreads(); if (threadIdx.y == 0) { max = numeric_min; for (int i = 0; i < blockDim.y; ++i) { T in_data = reduceVals[i * blockDim.x + threadIdx.x]; max = max > in_data ? max : in_data; } // setting output to 0 to not break gradient max = max == numeric_min ? 0 : max; out[group * post + threadIdx.x] = max; } } else { for (int i = threadIdx.x; i < post; i += blockDim.x) { T max = numeric_min; for (int line = start; line < end; ++line) { T in_data = in[indices[line] * post + i]; max = max > in_data ? max : in_data; } // setting output to 0 to not break gradient max = max == numeric_min ? 0 : max; out[group * post + i] = max; } } } template <typename T, typename IndexType, bool ExactBlock = false> #ifdef __HIP_PLATFORM_HCC__ C10_LAUNCH_BOUNDS_2(1024, SEGREDUCE_MINBLOCKS) #endif __global__ void sparse_length_weighted_sum_kernel( const T* __restrict__ in, const T* __restrict__ in_weights, T* __restrict__ out, const int* __restrict__ prefix_sum_length_data, const IndexType* __restrict__ indices, int N, int post, int len_length, int len_indices) { // len_length blocks int group = blockIdx.x; int start = group == 0 ? 0 : prefix_sum_length_data[group - 1]; int end = prefix_sum_length_data[group]; CUDA_KERNEL_ASSERT(start <= len_indices); CUDA_KERNEL_ASSERT(end <= len_indices); extern __shared__ T reduceVals[]; if (ExactBlock) { T sum = (T)0; in += threadIdx.x; for (int line = start + threadIdx.y; line < end; line += blockDim.y) { sum += in_weights[line] * in[indices[line] * post]; } reduceVals[threadIdx.y * blockDim.x + threadIdx.x] = sum; __syncthreads(); if (threadIdx.y == 0) { sum = (T)0; for (int i = 0; i < blockDim.y; ++i) { sum += reduceVals[i * blockDim.x + threadIdx.x]; } out[group * post + threadIdx.x] = sum; } } else { for (int i = threadIdx.x; i < post; i += blockDim.x) { T sum = (T)0; for (int line = start; line < end; ++line) { sum += in_weights[line] * in[indices[line] * post + i]; } out[group * post + i] = sum; } } } } // namespace template <typename T, class Context = CUDAContext, bool SparseFused = true> class CUDASparseLengthsSumOp : public Operator<CUDAContext> { public: USE_OPERATOR_CONTEXT_FUNCTIONS; template <class... Args> explicit CUDASparseLengthsSumOp(Args&&... args) : Operator<CUDAContext>(std::forward<Args>(args)...) {} ~CUDASparseLengthsSumOp() {} bool RunOnDevice() override { if (SparseFused) { return DispatchHelper<TensorTypes<int32_t, int64_t>>::call( this, Input(INDICES)); } else { // type doesn't matter return DoRunWithType<int32_t>(); } } template <typename IndexType> bool DoRunWithType() { if (SparseFused) { return DispatchHelper<TensorTypes2<float, at::Half>, IndexType>::call( this, Input(DATA)); } else { return DoRunWithType2<IndexType, T>(); } } template <typename IndexType, typename InType> bool DoRunWithType2() { auto& dataInput = Input(DATA); auto& lengthsInput = Input(LENGTHS); CAFFE_ENFORCE_EQ(1, lengthsInput.dim(), "LENGTHS must be a vector"); const int64_t dataSize = dataInput.dim(0); // Either first dim the data or how much we pull in indexies from it int64_t dataToReduceSize; const int64_t outputSize = lengthsInput.dim(0); const int len_length = outputSize; auto shape = dataInput.sizes().vec(); shape[0] = outputSize; auto* output = Output(0, shape, at::dtype<T>()); T* out_data = output->template mutable_data<T>(); if (len_length <= 0) { // return early to avoid invalid empty kernel return true; } const IndexType* indices; if (SparseFused) { // static if auto& indicesInput = Input(INDICES); CAFFE_ENFORCE_EQ(1, indicesInput.dim(), "INDICES must be a vector"); indices = indicesInput.template data<IndexType>(); dataToReduceSize = indicesInput.dim(0); } else { dataToReduceSize = dataSize; } // only compute this the first time inclusive_scan_length_buffer_.ResizeLike(lengthsInput); inclusive_scan_wrapper( lengthsInput.template data<int>(), len_length, &inclusive_scan_buffer_, &inclusive_scan_length_buffer_, &context_); auto* prefix_sum_length_data = inclusive_scan_length_buffer_.template data<int>(); int N = dataSize; int post = dataInput.size_from_dim(1); auto maxThreads = GetDeviceProperty(CaffeCudaGetDevice()).maxThreadsPerBlock; if (SparseFused) { const InType* in_data = dataInput.template data<InType>(); if (post <= maxThreads) { int multiple = std::min(maxThreads / post, SEGREDUCE_MINBLOCKS); dim3 block(post, multiple); size_t smem = sizeof(T) * post * multiple; // calling cuda kernel with ExactBlock = true, Average = false sparse_length_sum_kernel<InType, T, IndexType, true, false> <<<len_length, block, smem, context_.cuda_stream()>>>( in_data, out_data, prefix_sum_length_data, indices, N, post, len_length, dataToReduceSize); } else { // calling cuda kernel with ExactBlock = false, Average = false sparse_length_sum_kernel<InType, T, IndexType, false, false> <<<len_length, maxThreads, 0, context_.cuda_stream()>>>( in_data, out_data, prefix_sum_length_data, indices, N, post, len_length, dataToReduceSize); } } else { const T* in_data = dataInput.template data<T>(); if (post <= maxThreads) { length_sum_kernel<T, true, false> <<<len_length, post, 0, context_.cuda_stream()>>>( in_data, out_data, prefix_sum_length_data, N, post, len_length); } else { length_sum_kernel<T, true, false> <<<len_length, maxThreads, 0, context_.cuda_stream()>>>( in_data, out_data, prefix_sum_length_data, N, post, len_length); } } return true; } enum { DATA = 0, INDICES = 1, LENGTHS = 1 + (SparseFused ? 1 : 0) }; private: // menber field to manage memory Tensor inclusive_scan_buffer_{CUDA}; Tensor inclusive_scan_length_buffer_{CUDA}; }; template <typename T, class Context = CUDAContext, bool SparseFused = true> class CUDASparseLengthsMeanOp : public Operator<CUDAContext> { public: USE_OPERATOR_CONTEXT_FUNCTIONS; template <class... Args> explicit CUDASparseLengthsMeanOp(Args&&... args) : Operator<CUDAContext>(std::forward<Args>(args)...) {} ~CUDASparseLengthsMeanOp() {} bool RunOnDevice() override { if (SparseFused) { return DispatchHelper<TensorTypes<int32_t, int64_t>>::call( this, Input(INDICES)); } else { // type doesn't matter return DoRunWithType<int32_t>(); } } template <typename IndexType> bool DoRunWithType() { if (SparseFused) { return DispatchHelper<TensorTypes2<float, at::Half>, IndexType>::call( this, Input(DATA)); } else { return DoRunWithType2<IndexType, T>(); } } template <typename IndexType, typename InType> bool DoRunWithType2() { auto& dataInput = Input(DATA); auto& lengthsInput = Input(LENGTHS); CAFFE_ENFORCE_EQ(1, lengthsInput.dim(), "LENGTHS must be a vector"); const int64_t dataSize = dataInput.dim(0); // Either first dim the data or how much we pull in indexies from it int64_t dataToReduceSize; const int64_t outputSize = lengthsInput.dim(0); const int len_length = outputSize; auto shape = dataInput.sizes().vec(); shape[0] = outputSize; auto* output = Output(0, shape, at::dtype<T>()); T* out_data = output->template mutable_data<T>(); if (len_length <= 0) { // return early to avoid invalid empty kernel return true; } const IndexType* indices; if (SparseFused) { // static if auto& indicesInput = Input(INDICES); CAFFE_ENFORCE_EQ(1, indicesInput.dim(), "INDICES must be a vector"); indices = indicesInput.template data<IndexType>(); dataToReduceSize = indicesInput.dim(0); } else { dataToReduceSize = dataSize; } // only compute this the first time inclusive_scan_length_buffer_.ResizeLike(lengthsInput); inclusive_scan_wrapper( lengthsInput.template data<int>(), len_length, &inclusive_scan_buffer_, &inclusive_scan_length_buffer_, &context_); auto* prefix_sum_length_data = inclusive_scan_length_buffer_.template data<int>(); int N = dataSize; int post = dataInput.size_from_dim(1); auto maxThreads = GetDeviceProperty(CaffeCudaGetDevice()).maxThreadsPerBlock; if (SparseFused) { const InType* in_data = dataInput.template data<InType>(); if (post <= maxThreads) { int multiple = std::min(maxThreads / post, SEGREDUCE_MINBLOCKS); dim3 block(post, multiple); size_t smem = sizeof(T) * post * multiple; // calling cuda kernel with ExactBlock = true, Average = true sparse_length_sum_kernel<InType, T, IndexType, true, true> <<<len_length, block, smem, context_.cuda_stream()>>>( in_data, out_data, prefix_sum_length_data, indices, N, post, len_length, dataToReduceSize); } else { // calling cuda kernel with ExactBlock = false, Average = true sparse_length_sum_kernel<InType, T, IndexType, false, true> <<<len_length, maxThreads, 0, context_.cuda_stream()>>>( in_data, out_data, prefix_sum_length_data, indices, N, post, len_length, dataToReduceSize); } } else { const T* in_data = dataInput.template data<T>(); if (post <= maxThreads) { // calling cuda kernel with ExactBlock = true, Average = true length_sum_kernel<T, true, true> <<<len_length, post, 0, context_.cuda_stream()>>>( in_data, out_data, prefix_sum_length_data, N, post, len_length); } else { // calling cuda kernel with ExactBlock = true, Average = true length_sum_kernel<T, true, true> <<<len_length, maxThreads, 0, context_.cuda_stream()>>>( in_data, out_data, prefix_sum_length_data, N, post, len_length); } } return true; } enum { DATA = 0, INDICES = 1, LENGTHS = 1 + (SparseFused ? 1 : 0) }; private: // menber field to manage memory Tensor inclusive_scan_buffer_{CUDA}; Tensor inclusive_scan_length_buffer_{CUDA}; }; template <typename T, class Context = CUDAContext, bool SparseFused = true> class CUDASparseLengthsMaxOp : public Operator<CUDAContext> { public: USE_OPERATOR_CONTEXT_FUNCTIONS; template <class... Args> explicit CUDASparseLengthsMaxOp(Args&&... args) : Operator<CUDAContext>(std::forward<Args>(args)...) {} ~CUDASparseLengthsMaxOp() {} bool RunOnDevice() override { if (SparseFused) { return DispatchHelper<TensorTypes<int32_t, int64_t>>::call( this, Input(INDICES)); } else { // type doesn't matter return DoRunWithType<int32_t>(); } } template <typename IndexType> bool DoRunWithType() { auto& dataInput = Input(0); auto& lengthsInput = Input(LENGTHS); CAFFE_ENFORCE_EQ(1, lengthsInput.dim(), "LENGTHS must be a vector"); const int64_t dataSize = dataInput.dim(0); // Either first dim the data or how much we pull in indexies from it int64_t dataToReduceSize; const int64_t outputSize = lengthsInput.dim(0); int len_length = outputSize; auto shape = dataInput.sizes().vec(); shape[0] = outputSize; auto* output = Output(0, shape, at::dtype<T>()); if (len_length <= 0) { // return early to avoid invalid empty kernel return true; } const IndexType* indices; if (SparseFused) { // static if auto& indicesInput = Input(INDICES); CAFFE_ENFORCE_EQ(1, indicesInput.dim(), "INDICES must be a vector"); indices = indicesInput.template data<IndexType>(); dataToReduceSize = indicesInput.dim(0); } else { dataToReduceSize = dataSize; } // only compute this the first time inclusive_scan_length_buffer_.ResizeLike(lengthsInput); inclusive_scan_wrapper( lengthsInput.template data<int>(), len_length, &inclusive_scan_buffer_, &inclusive_scan_length_buffer_, &context_); const T* in_data = dataInput.template data<T>(); T* out_data = output->template mutable_data<T>(); auto* prefix_sum_length_data = inclusive_scan_length_buffer_.template data<int>(); int N = dataSize; int post = dataInput.size_from_dim(1); auto maxThreads = GetDeviceProperty(CaffeCudaGetDevice()).maxThreadsPerBlock; T numeric_min = std::numeric_limits<T>::min(); if (SparseFused) { if (post <= maxThreads) { int multiple = std::min(maxThreads / post, SEGREDUCE_MINBLOCKS); dim3 block(post, multiple); size_t smem = sizeof(T) * post * multiple; sparse_length_max_kernel<T, IndexType, true> <<<len_length, block, smem, context_.cuda_stream()>>>( in_data, out_data, prefix_sum_length_data, indices, N, post, len_length, dataToReduceSize, numeric_min); } else { sparse_length_max_kernel<T, IndexType, false> <<<len_length, maxThreads, 0, context_.cuda_stream()>>>( in_data, out_data, prefix_sum_length_data, indices, N, post, len_length, dataToReduceSize, numeric_min); } } else { if (post <= maxThreads) { length_max_kernel<T, true> <<<len_length, post, 0, context_.cuda_stream()>>>( in_data, out_data, prefix_sum_length_data, N, post, len_length, numeric_min); } else { length_max_kernel<T, true> <<<len_length, maxThreads, 0, context_.cuda_stream()>>>( in_data, out_data, prefix_sum_length_data, N, post, len_length, numeric_min); } } return true; } enum { INDICES = 1, LENGTHS = 1 + (SparseFused ? 1 : 0) }; private: // menber field to manage memory Tensor inclusive_scan_buffer_{CUDA}; Tensor inclusive_scan_length_buffer_{CUDA}; }; template <typename T, class Context = CUDAContext, bool SparseFused = true> class CUDASparseLengthsWeightedSumOp : public Operator<CUDAContext> { public: USE_OPERATOR_CONTEXT_FUNCTIONS; CUDASparseLengthsWeightedSumOp(const OperatorDef& operator_def, Workspace* ws) : Operator<CUDAContext>(operator_def, ws) {} ~CUDASparseLengthsWeightedSumOp() {} bool RunOnDevice() override { return DispatchHelper<TensorTypes<int32_t, int64_t>>::call( this, Input(INDICES)); } template <typename IndexType> bool DoRunWithType() { auto& dataInput = Input(DATA); auto& weightsInput = Input(WEIGHTS); auto& indicesInput = Input(INDICES); auto& lengthsInput = Input(LENGTHS); CAFFE_ENFORCE_EQ(1, weightsInput.dim(), "WEIGHTS must be a vector"); CAFFE_ENFORCE_EQ(1, indicesInput.dim(), "INDICES must be a vector"); CAFFE_ENFORCE_EQ(1, lengthsInput.dim(), "LENGTHS must be a vector"); const int64_t dataSize = dataInput.dim(0); // Either first dim the data or how much we pull in indexies from it const int64_t dataToReduceSize = indicesInput.dim(0); const int64_t outputSize = lengthsInput.dim(0); const int len_length = outputSize; auto shape = dataInput.sizes().vec(); shape[0] = outputSize; auto* output = Output(0, shape, at::dtype<T>()); T* out_data = output->template mutable_data<T>(); if (len_length <= 0) { // return early to avoid invalid empty kernel return true; } inclusive_scan_length_buffer_.ResizeLike(lengthsInput); inclusive_scan_wrapper( lengthsInput.template data<int>(), len_length, &inclusive_scan_buffer_, &inclusive_scan_length_buffer_, &context_); const IndexType* indices = indicesInput.template data<IndexType>(); const T* in_data = dataInput.template data<T>(); const T* in_weights = weightsInput.template data<T>(); auto* prefix_sum_length_data = inclusive_scan_length_buffer_.template data<int>(); int N = dataSize; int post = dataInput.size_from_dim(1); auto maxThreads = GetDeviceProperty(CaffeCudaGetDevice()).maxThreadsPerBlock; if (post <= maxThreads) { int multiple = std::min(maxThreads / post, SEGREDUCE_MINBLOCKS); dim3 block(post, multiple); size_t smem = sizeof(T) * post * multiple; sparse_length_weighted_sum_kernel<T, IndexType, true> <<<len_length, block, smem, context_.cuda_stream()>>>( in_data, in_weights, out_data, prefix_sum_length_data, indices, N, post, len_length, dataToReduceSize); } else { sparse_length_weighted_sum_kernel<T, IndexType, false> <<<len_length, maxThreads, 0, context_.cuda_stream()>>>( in_data, in_weights, out_data, prefix_sum_length_data, indices, N, post, len_length, dataToReduceSize); } return true; } enum { DATA = 0, WEIGHTS = 1, INDICES = 2, LENGTHS = 3 }; private: // menber field to manage memory Tensor inclusive_scan_buffer_{CUDA}; Tensor inclusive_scan_length_buffer_{CUDA}; }; template <typename SIndex> __global__ void MaxSegmentKernel(int n, const SIndex* segment_ids, SIndex* max_segment) { typedef cub::BlockReduce<SIndex, CAFFE_CUDA_NUM_THREADS> BlockReduce; __shared__ typename BlockReduce::TempStorage temp_storage; int mx = 0; for (int j = threadIdx.x; j < n; j += blockDim.x) { mx = segment_ids[j] > mx ? segment_ids[j] : mx; } SIndex max_seg = BlockReduce(temp_storage).Reduce(mx, cub::Max()); if (threadIdx.x == 0) { *max_segment = max_seg; } } template <typename SIndex, typename T> __global__ void UnsortedSegmentSumKernel( int n, int slize_sz, const SIndex* segments, const T* data, T* out, int* scales) { CUDA_1D_KERNEL_LOOP(i, n) { int slice_idx = i / slize_sz; int j = i % slize_sz; SIndex segment = segments[slice_idx]; atomicAdd(&out[segment * slize_sz + j], data[i]); if (scales && j == 0) { atomicAdd(&scales[segment], 1); } } } template <typename SIndex, typename T> __global__ void SegmentScalingKernel(int m, int slize_sz, const int* scales, T* out) { CUDA_1D_KERNEL_LOOP(i, m) { int scale = scales[i / slize_sz]; out[i] = scale > 0 ? out[i] / scale : 0.0; // avoid 0/0 division } } template <typename T, typename SIndex, bool mean> class CUDAUnsortedSegmentSumOp : public Operator<CUDAContext> { public: USE_OPERATOR_FUNCTIONS(CUDAContext); CUDAUnsortedSegmentSumOp(const OperatorDef& operator_def, Workspace* ws) : Operator<CUDAContext>(operator_def, ws) {} ~CUDAUnsortedSegmentSumOp() {} bool RunOnDevice() override { auto& data = Input(0); auto& segment_ids = Input(1); if (segment_ids.numel() == 0 || data.numel() == 0) { // Special handling for empty input auto dims = data.sizes().vec(); if (dims.size() > 0) { dims[0] = 0; } Output(0, dims, at::dtype<T>()); return true; } CAFFE_ENFORCE_EQ(1, segment_ids.dim(), "SEGMENT_IDS must be a vector"); int64_t slize_sz = data.size_from_dim(1); ReinitializeTensor(&K_tensor_, {1}, at::dtype<SIndex>().device(CUDA)); // Get maximum segment id so we can size the output. // This must be done synchronously with host. if (segment_ids.numel() > 4096) { // when the input size is large, device reduce is better. size_t tmp_storage_bytes = 0; // the first call to `Max` do nothing, but set correct tmp_storage_bytes. cub::DeviceReduce::Max( nullptr, tmp_storage_bytes, segment_ids.template data<SIndex>(), // input device data K_tensor_.template mutable_data<SIndex>(), // output device data segment_ids.numel(), // number of items context_.cuda_stream()); // the second call do the real computation. ReinitializeTensor( &buffer_tensor_, {static_cast<int64_t>(tmp_storage_bytes)}, at::dtype<char>().device(CUDA)); cub::DeviceReduce::Max( static_cast<void*>(buffer_tensor_.mutable_data<char>()), tmp_storage_bytes, segment_ids.template data<SIndex>(), // input device data K_tensor_.template mutable_data<SIndex>(), // output device data segment_ids.numel(), // number of items context_.cuda_stream()); } else { MaxSegmentKernel<SIndex> <<<1, CAFFE_CUDA_NUM_THREADS, 0, context_.cuda_stream()>>>( segment_ids.numel(), segment_ids.template data<SIndex>(), K_tensor_.mutable_data<SIndex>()); } SIndex K = 0; context_.CopyBytesToCPU( sizeof(SIndex), K_tensor_.template data<SIndex>(), &K); context_.FinishDeviceComputation(); auto dims = data.sizes().vec(); dims[0] = K + 1; auto* output = Output(0, dims, at::dtype<T>()); // Clear the output as we will be accumulating the values math::Set<T, CUDAContext>( output->numel(), T(0), output->template mutable_data<T>(), &context_); if (!mean) { UnsortedSegmentSumKernel<SIndex, T> <<<CAFFE_GET_BLOCKS(data.numel()), CAFFE_CUDA_NUM_THREADS, 0, context_.cuda_stream()>>>( data.numel(), slize_sz, segment_ids.template data<SIndex>(), data.template data<T>(), output->template mutable_data<T>(), nullptr); } else { // For mean, we need to compute scaling factors ReinitializeTensor( &scaling_factors_, {K + 1}, at::dtype<int>().device(CUDA)); math::Set<int, CUDAContext>( scaling_factors_.numel(), int(0), scaling_factors_.template mutable_data<int>(), &context_); UnsortedSegmentSumKernel<SIndex, T> <<<CAFFE_GET_BLOCKS(data.numel()), CAFFE_CUDA_NUM_THREADS, 0, context_.cuda_stream()>>>( data.numel(), slize_sz, segment_ids.template data<SIndex>(), data.template data<T>(), output->template mutable_data<T>(), scaling_factors_.template mutable_data<int>()); // Divide by the scaling factors to get means SegmentScalingKernel<SIndex, T> <<<CAFFE_GET_BLOCKS(output->numel()), CAFFE_CUDA_NUM_THREADS, 0, context_.cuda_stream()>>>( output->numel(), slize_sz, scaling_factors_.template data<int>(), output->template mutable_data<T>()); } return true; } private: Tensor buffer_tensor_; Tensor K_tensor_; Tensor scaling_factors_; // for mean }; template <typename SIndex> __global__ void segment_lengths_kernel(int N, const SIndex* X, SIndex* Y) { CUDA_1D_KERNEL_LOOP(i, N) { atomicAdd(&Y[X[i]], 1); } } template <typename T, typename SIndex, bool LOGEXP = false> __global__ void sorted_segment_mean_kernel( const SIndex K, const int N, const SIndex* S, const SIndex* I, const T* X, T* Y) { for (int sId = blockIdx.x; sId < K; sId += gridDim.x) { const int start_index = sId > 0 ? S[sId] * N : 0; const int y_start_index = sId * N; for (int i = threadIdx.x; i < N; i += blockDim.x) { T sum = 0.0; for (int j = 0; j < I[sId]; ++j) { const T x_i_j = X[start_index + j * N + i]; sum += LOGEXP ? exp(x_i_j) : x_i_j; } const T norm_sum = sum / I[sId]; Y[y_start_index + i] = LOGEXP ? log(norm_sum) : norm_sum; } } } template <typename T, typename SIndex, bool LOGEXP, class Context = CUDAContext> class SortedSegmentRangeMeanOp : public Operator<Context> { public: USE_OPERATOR_CONTEXT_FUNCTIONS; SortedSegmentRangeMeanOp(const OperatorDef& operator_def, Workspace* ws) : Operator<CUDAContext>(operator_def, ws) {} ~SortedSegmentRangeMeanOp() {} bool RunOnDevice() override { const auto& input = Input(0); const auto& indices = Input(1); int M = input.dim32(0); int N = input.size_from_dim(1); auto* output = Output(0); auto dims = input.sizes().vec(); SIndex K = 0; context_.CopyBytesToCPU( sizeof(SIndex), indices.template data<SIndex>() + indices.size() - 1, &K); context_.FinishDeviceComputation(); K += 1; dims[0] = K; if (segment_len_.size() != K) { segment_len_.Resize(K); segment_len_prefix_sum_.Resize(K); } output->Resize(dims); math::Set<SIndex, CUDAContext>( segment_len_.size(), 0, segment_len_.template mutable_data<SIndex>(), &context_); segment_lengths_kernel<<< CAFFE_GET_BLOCKS(indices.size()), CAFFE_CUDA_NUM_THREADS, 0, context_.cuda_stream()>>>( indices.size(), indices.template data<SIndex>(), segment_len_.template mutable_data<SIndex>()); size_t temp_storage_bytes = 0; cub::DeviceScan::ExclusiveSum( nullptr, temp_storage_bytes, segment_len_.template data<SIndex>(), segment_len_prefix_sum_.template mutable_data<SIndex>(), K, context_.cuda_stream()); auto buffer_size = (temp_storage_bytes + sizeof(T)) / sizeof(T); prefix_buffer_.Resize(buffer_size); void* dev_temp_storage = static_cast<void*>(prefix_buffer_.mutable_data<T>()); cub::DeviceScan::ExclusiveSum( dev_temp_storage, temp_storage_bytes, segment_len_.template data<SIndex>(), segment_len_prefix_sum_.template mutable_data<SIndex>(), K, context_.cuda_stream()); sorted_segment_mean_kernel<T, SIndex, LOGEXP> <<<std::min(K, CAFFE_MAXIMUM_NUM_BLOCKS), CAFFE_CUDA_NUM_THREADS, 0, context_.cuda_stream()>>>( K, N, segment_len_prefix_sum_.template data<SIndex>(), segment_len_.template data<SIndex>(), input.template data<T>(), output->template mutable_data<T>()); return true; } private: Tensor segment_len_{CUDA}; // for mean Tensor segment_len_prefix_sum_{CUDA}; Tensor prefix_buffer_{CUDA}; }; template <typename T, typename SIndex, bool LOGEXP = false> __global__ void sorted_segment_mean_gradient_kernel( const int M, const int N, const T* X, const T* Y, const T* dY, const SIndex* I, const SIndex* S, T* dX) { CUDA_1D_KERNEL_LOOP(i, M * N) { const int sId = I[i / N]; const int sSize = S[sId]; const int yId = N * sId + i % N; dX[i] = LOGEXP ? dY[yId] * exp(X[i] - Y[yId]) / sSize : dY[yId] / sSize; } } template <typename T, typename SIndex, bool LOGEXP, class Context = CUDAContext> class SortedSegmentRangeMeanGradientOp : public Operator<Context> { public: USE_OPERATOR_CONTEXT_FUNCTIONS; SortedSegmentRangeMeanGradientOp( const OperatorDef& operator_def, Workspace* ws) : Operator<CUDAContext>(operator_def, ws) {} ~SortedSegmentRangeMeanGradientOp() {} bool RunOnDevice() override { const auto& X = Input(0); const auto& Y = Input(1); const auto& dY = Input(2); const auto& I = Input(3); auto* dX = Output(0, X.sizes(), at::dtype<T>()); const int M = X.dim32(0); const int N = X.size_from_dim(1); SIndex K = 0; context_.CopyBytesToCPU( sizeof(SIndex), I.template data<SIndex>() + I.numel() - 1, &K); K += 1; if (segment_len_.numel() != K) { ReinitializeTensor(&segment_len_, {K}, at::dtype<SIndex>().device(CUDA)); } math::Set<SIndex, CUDAContext>( segment_len_.numel(), 0, segment_len_.template mutable_data<SIndex>(), &context_); segment_lengths_kernel<<< CAFFE_GET_BLOCKS(I.numel()), CAFFE_CUDA_NUM_THREADS, 0, context_.cuda_stream()>>>( I.numel(), I.template data<SIndex>(), segment_len_.template mutable_data<SIndex>()); sorted_segment_mean_gradient_kernel<T, SIndex, LOGEXP> <<<CAFFE_GET_BLOCKS(dX->numel()), CAFFE_CUDA_NUM_THREADS, 0, context_.cuda_stream()>>>( M, N, X.template data<T>(), Y.template data<T>(), dY.template data<T>(), I.template data<SIndex>(), segment_len_.template data<SIndex>(), dX->template mutable_data<T>()); return true; } private: Tensor segment_len_; // for mean }; REGISTER_CUDA_OPERATOR_STR( "LengthsSum", CUDASparseLengthsSumOp<float, CUDAContext, false>); REGISTER_CUDA_OPERATOR_STR( "SparseLengthsSum", CUDASparseLengthsSumOp<float, CUDAContext, true>); REGISTER_CUDA_OPERATOR_STR( "LengthsMean", CUDASparseLengthsMeanOp<float, CUDAContext, false>); REGISTER_CUDA_OPERATOR_STR( "SparseLengthsMean", CUDASparseLengthsMeanOp<float, CUDAContext, true>); REGISTER_CUDA_OPERATOR_STR( "LengthsMax", CUDASparseLengthsMaxOp<float, CUDAContext, false>); REGISTER_CUDA_OPERATOR_STR( "SparseLengthsMax", CUDASparseLengthsMaxOp<float, CUDAContext, true>); REGISTER_CUDA_OPERATOR_STR( "SparseLengthsWeightedSum", CUDASparseLengthsWeightedSumOp<float, CUDAContext, true>); REGISTER_CUDA_OPERATOR_STR( "UnsortedSegmentSum", CUDAUnsortedSegmentSumOp<float, int, false>); REGISTER_CUDA_OPERATOR_STR( "UnsortedSegmentMean", CUDAUnsortedSegmentSumOp<float, int, true>); REGISTER_CUDA_OPERATOR_STR( "SortedSegmentRangeMean", SortedSegmentRangeMeanOp<float, int, false>); REGISTER_CUDA_OPERATOR_STR( "SortedSegmentRangeLogMeanExp", SortedSegmentRangeMeanOp<float, int, true>); REGISTER_CUDA_OPERATOR_STR( "SortedSegmentRangeMeanGradient", SortedSegmentRangeMeanGradientOp<float, int, false>); REGISTER_CUDA_OPERATOR_STR( "SortedSegmentRangeLogMeanExpGradient", SortedSegmentRangeMeanGradientOp<float, int, true>); template <typename T, class Context = CUDAContext> class CUDASparseLengthsSumGradientWithIndicesOp : public Operator<CUDAContext> { public: USE_OPERATOR_CONTEXT_FUNCTIONS; CUDASparseLengthsSumGradientWithIndicesOp( const OperatorDef& operator_def, Workspace* ws) : Operator<CUDAContext>(operator_def, ws) {} ~CUDASparseLengthsSumGradientWithIndicesOp() {} bool RunOnDevice() override { auto& segmentGradsInput = Input(0); auto& lengthsInput = Input(1); auto& indicesInput = Input(2); CAFFE_ENFORCE_EQ(1, lengthsInput.dim(), "LENGTHS must be a vector"); const int len_length = lengthsInput.dim(0); CAFFE_ENFORCE(segmentGradsInput.dim() > 0); CAFFE_ENFORCE(len_length == segmentGradsInput.dim(0)); auto shape = segmentGradsInput.sizes().vec(); int output_0dim = indicesInput.dim(0); shape[0] = output_0dim; auto* dataGradsOutput = Output(0, shape, at::dtype<T>()); T* out_data = dataGradsOutput->template mutable_data<T>(); if (len_length <= 0) { // return early to avoid invalid empty kernel return true; } inclusive_scan_length_buffer_.ResizeLike(lengthsInput); inclusive_scan_wrapper( lengthsInput.template data<int>(), len_length, &inclusive_scan_buffer_, &inclusive_scan_length_buffer_, &context_); // compute output size using length auto* prefix_sum_length_data = inclusive_scan_length_buffer_.template data<int>(); const T* in_data = segmentGradsInput.template data<T>(); int N = output_0dim; int post = segmentGradsInput.size_from_dim(1); auto maxThreads = GetDeviceProperty(CaffeCudaGetDevice()).maxThreadsPerBlock; if (post <= maxThreads) { int multiple = std::min(maxThreads / post, SEGREDUCE_MINBLOCKS); dim3 block(post, multiple); // calling cuda kernel with ExactBlock = true, Average = false length_sum_gradient_kernel<T, true, false> <<<len_length, block, 0, context_.cuda_stream()>>>( in_data, out_data, prefix_sum_length_data, N, post, len_length); } else { // calling cuda kernel with ExactBlock = false, Average = false length_sum_gradient_kernel<T, false, false> <<<len_length, maxThreads, 0, context_.cuda_stream()>>>( in_data, out_data, prefix_sum_length_data, N, post, len_length); } return true; } private: // menber field to manage memory Tensor inclusive_scan_buffer_{CUDA}; Tensor inclusive_scan_length_buffer_{CUDA}; }; template <typename T, class Context = CUDAContext> class CUDASparseLengthsMeanGradientWithIndicesOp : public Operator<CUDAContext> { public: USE_OPERATOR_CONTEXT_FUNCTIONS; CUDASparseLengthsMeanGradientWithIndicesOp( const OperatorDef& operator_def, Workspace* ws) : Operator<CUDAContext>(operator_def, ws) {} ~CUDASparseLengthsMeanGradientWithIndicesOp() {} bool RunOnDevice() override { auto& segmentGradsInput = Input(0); auto& lengthsInput = Input(1); auto& indicesInput = Input(2); CAFFE_ENFORCE_EQ(1, lengthsInput.dim(), "LENGTHS must be a vector"); const int len_length = lengthsInput.dim(0); CAFFE_ENFORCE(segmentGradsInput.dim() > 0); CAFFE_ENFORCE(len_length == segmentGradsInput.dim(0)); auto shape = segmentGradsInput.sizes().vec(); int output_0dim = indicesInput.dim(0); shape[0] = output_0dim; auto* dataGradsOutput = Output(0, shape, at::dtype<T>()); T* out_data = dataGradsOutput->template mutable_data<T>(); if (len_length <= 0) { // return early to avoid invalid empty kernel return true; } inclusive_scan_length_buffer_.ResizeLike(lengthsInput); inclusive_scan_wrapper( lengthsInput.template data<int>(), len_length, &inclusive_scan_buffer_, &inclusive_scan_length_buffer_, &context_); // compute output size using length auto* prefix_sum_length_data = inclusive_scan_length_buffer_.template data<int>(); const T* in_data = segmentGradsInput.template data<T>(); int N = output_0dim; int post = segmentGradsInput.size_from_dim(1); auto maxThreads = GetDeviceProperty(CaffeCudaGetDevice()).maxThreadsPerBlock; if (post <= maxThreads) { int multiple = std::min(maxThreads / post, SEGREDUCE_MINBLOCKS); dim3 block(post, multiple); // calling cuda kernel with ExactBlock = true, Average = true length_sum_gradient_kernel<T, true, true> <<<len_length, block, 0, context_.cuda_stream()>>>( in_data, out_data, prefix_sum_length_data, N, post, len_length); } else { // calling cuda kernel with ExactBlock = false, Average = true length_sum_gradient_kernel<T, false, true> <<<len_length, maxThreads, 0, context_.cuda_stream()>>>( in_data, out_data, prefix_sum_length_data, N, post, len_length); } return true; } private: // menber field to manage memory Tensor inclusive_scan_buffer_{CUDA}; Tensor inclusive_scan_length_buffer_{CUDA}; }; template <typename T, class Context = CUDAContext> class CUDASparseLengthsWeightedSumGradientWithIndicesOp : public Operator<CUDAContext> { public: USE_OPERATOR_CONTEXT_FUNCTIONS; CUDASparseLengthsWeightedSumGradientWithIndicesOp( const OperatorDef& operator_def, Workspace* ws) : Operator<CUDAContext>(operator_def, ws) {} ~CUDASparseLengthsWeightedSumGradientWithIndicesOp() {} bool RunOnDevice() override { auto& weightsInput = Input(0); auto& segmentGradsInput = Input(1); auto& lengthsInput = Input(2); auto& indicesInput = Input(3); CAFFE_ENFORCE_EQ(1, lengthsInput.dim(), "LENGTHS must be a vector"); CAFFE_ENFORCE_EQ(1, weightsInput.dim(), "WEIGHTS must be a vector"); const int len_length = lengthsInput.dim(0); CAFFE_ENFORCE(segmentGradsInput.dim() > 0); CAFFE_ENFORCE(len_length == segmentGradsInput.dim(0)); auto shape = segmentGradsInput.sizes().vec(); int output_0dim = indicesInput.dim(0); shape[0] = output_0dim; auto* dataGradsOutput = Output(0, shape, at::dtype<T>()); T* out_data = dataGradsOutput->template mutable_data<T>(); if (len_length <= 0) { // return early to avoid invalid empty kernel return true; } inclusive_scan_length_buffer_.ResizeLike(lengthsInput); inclusive_scan_wrapper( lengthsInput.template data<int>(), len_length, &inclusive_scan_buffer_, &inclusive_scan_length_buffer_, &context_); // compute output size using length auto* prefix_sum_length_data = inclusive_scan_length_buffer_.template data<int>(); const T* in_data = segmentGradsInput.template data<T>(); const T* in_weights = weightsInput.template data<T>(); int N = output_0dim; int post = segmentGradsInput.size_from_dim(1); auto maxThreads = GetDeviceProperty(CaffeCudaGetDevice()).maxThreadsPerBlock; if (post < maxThreads) { int multiple = std::min(maxThreads / post, SEGREDUCE_MINBLOCKS); dim3 block(post, multiple); length_weighted_sum_gradient_kernel<T, true> <<<len_length, block, 0, context_.cuda_stream()>>>( in_data, in_weights, out_data, prefix_sum_length_data, N, post, len_length); } else { length_weighted_sum_gradient_kernel<T, false> <<<len_length, maxThreads, 0, context_.cuda_stream()>>>( in_data, in_weights, out_data, prefix_sum_length_data, N, post, len_length); } return true; } private: // menber field to manage memory Tensor inclusive_scan_buffer_{CUDA}; Tensor inclusive_scan_length_buffer_{CUDA}; }; template <typename T, bool ExactBlock = false> __global__ void length_max_gradient_kernel( const T* __restrict__ grad_in, T* __restrict__ grad_out, const T* data_in, const T* data_out, const int* __restrict__ prefix_sum_length_data, int N, int post, int len_length) { // len_length blocks int group = blockIdx.x; int start = group == 0 ? 0 : prefix_sum_length_data[group - 1]; int end = prefix_sum_length_data[group]; CUDA_KERNEL_ASSERT(start <= N); CUDA_KERNEL_ASSERT(end <= N); if (ExactBlock) { grad_out += threadIdx.x; grad_in += threadIdx.x; data_in += threadIdx.x; data_out += threadIdx.x; for (int line = start + threadIdx.y; line < end; line += blockDim.y) { if (data_in[line * post] == data_out[group * post]) { grad_out[line * post] = grad_in[group * post]; } else { grad_out[line * post] = 0; } } } else { for (int i = threadIdx.x; i < post; i += blockDim.x) { for (int line = start; line < end; ++line) { if (data_in[line * post + i] == data_out[group * post + i]) { grad_out[line * post + i] = grad_in[group * post + i]; } else { grad_out[line * post + i] = 0; } } } } } template <typename T, class Context = CUDAContext> class CUDALengthsMaxWithMainInputAndForwardOutputGradientOp : public Operator<CUDAContext> { public: USE_OPERATOR_CONTEXT_FUNCTIONS; CUDALengthsMaxWithMainInputAndForwardOutputGradientOp( const OperatorDef& operator_def, Workspace* ws) : Operator<CUDAContext>(operator_def, ws) {} ~CUDALengthsMaxWithMainInputAndForwardOutputGradientOp() {} bool RunOnDevice() override { return DispatchHelper<TensorTypes<int32_t, float>>::call(this, Input(3)); } template <typename IndexType> bool DoRunWithType() { auto& segmentGradsInput = Input(1); auto& lengthsInput = Input(2); auto& dataInput = Input(3); auto& dataOutput = Input(0); // based on CPU version CAFFE_ENFORCE_EQ(1, lengthsInput.dim(), "LENGTHS must be a vector"); int len_length = lengthsInput.dim(0); CAFFE_ENFORCE(segmentGradsInput.dim() > 0); CAFFE_ENFORCE(len_length == segmentGradsInput.dim(0)); inclusive_scan_length_buffer_.ResizeLike(lengthsInput); inclusive_scan_wrapper( lengthsInput.template data<int>(), len_length, &inclusive_scan_buffer_, &inclusive_scan_length_buffer_, &context_); // compute output size using length auto* prefix_sum_length_data = inclusive_scan_length_buffer_.template data<int>(); auto shape = dataInput.sizes().vec(); auto* dataGradsOutput = Output(0, shape, at::dtype<T>()); const T* in_data = segmentGradsInput.template data<T>(); T* out_data = dataGradsOutput->template mutable_data<T>(); int N = dataInput.dim(0); int post = segmentGradsInput.size_from_dim(1); if (len_length <= 0) { // return early to avoid invalid empty kernel return true; } auto maxThreads = GetDeviceProperty(CaffeCudaGetDevice()).maxThreadsPerBlock; if (post <= maxThreads) { int multiple = std::min(maxThreads / post, SEGREDUCE_MINBLOCKS); dim3 block(post, multiple); length_max_gradient_kernel<T, true> <<<len_length, block, 0, context_.cuda_stream()>>>( in_data, out_data, dataInput.template data<T>(), dataOutput.template data<T>(), prefix_sum_length_data, N, post, len_length); } else { length_max_gradient_kernel<T, false> <<<len_length, maxThreads, 0, context_.cuda_stream()>>>( in_data, out_data, dataInput.template data<T>(), dataOutput.template data<T>(), prefix_sum_length_data, N, post, len_length); } return true; } private: // menber field to manage memory Tensor inclusive_scan_buffer_{CUDA}; Tensor inclusive_scan_length_buffer_{CUDA}; }; template <typename T, class Context = CUDAContext> class CUDASparseLengthsIndicesInGradientWeightedSumWithMainInputGradientOp : public Operator<CUDAContext> { public: USE_OPERATOR_CONTEXT_FUNCTIONS; CUDASparseLengthsIndicesInGradientWeightedSumWithMainInputGradientOp( const OperatorDef& operator_def, Workspace* ws) : Operator<CUDAContext>(operator_def, ws) {} ~CUDASparseLengthsIndicesInGradientWeightedSumWithMainInputGradientOp() {} bool RunOnDevice() override { return DispatchHelper<TensorTypes<int32_t, int64_t>>::call(this, Input(4)); } template <typename IndexType> bool DoRunWithType() { auto& weightsInput = Input(0); auto& segmentGradsInput = Input(1); auto& lengthsInput = Input(2); auto& dataInput = Input(3); auto& indicesInput = Input(4); CAFFE_ENFORCE_EQ(1, lengthsInput.dim(), "LENGTHS must be a vector"); CAFFE_ENFORCE_EQ(1, weightsInput.dim(), "WEIGHTS must be a vector"); const int len_length = lengthsInput.dim(0); CAFFE_ENFORCE(segmentGradsInput.dim() > 0); CAFFE_ENFORCE(len_length == segmentGradsInput.dim(0)); auto shape = segmentGradsInput.sizes().vec(); int output_0dim = indicesInput.dim(0); shape[0] = output_0dim; auto* dataGradsOutput = Output(0, shape, at::dtype<T>()); auto* weightGradsOutput = Output(1, indicesInput.sizes(), at::dtype<T>()); T* out_data_grads = dataGradsOutput->template mutable_data<T>(); T* out_weight_grads = weightGradsOutput->template mutable_data<T>(); if (len_length <= 0) { // return early to avoid invalid empty kernel return true; } inclusive_scan_length_buffer_.ResizeLike(lengthsInput); inclusive_scan_wrapper( lengthsInput.template data<int>(), len_length, &inclusive_scan_buffer_, &inclusive_scan_length_buffer_, &context_); // compute output size using length auto* prefix_sum_length_data = inclusive_scan_length_buffer_.template data<int>(); const T* in_data = dataInput.template data<T>(); const T* in_grads = segmentGradsInput.template data<T>(); const T* in_weights = weightsInput.template data<T>(); const IndexType* indices = indicesInput.template data<IndexType>(); int N = output_0dim; int post = segmentGradsInput.size_from_dim(1); if (post > 128) { length_weighted_sum_with_main_input_gradient_kernel<T, IndexType, 512> <<<len_length, 512, 0, context_.cuda_stream()>>>( in_grads, in_weights, in_data, indices, out_data_grads, out_weight_grads, prefix_sum_length_data, N, post, len_length); } else if (post > 64) { length_weighted_sum_with_main_input_gradient_kernel<T, IndexType, 128> <<<len_length, 128, 0, context_.cuda_stream()>>>( in_grads, in_weights, in_data, indices, out_data_grads, out_weight_grads, prefix_sum_length_data, N, post, len_length); } else if (post > 32) { length_weighted_sum_with_main_input_gradient_kernel<T, IndexType, 64> <<<len_length, 64, 0, context_.cuda_stream()>>>( in_grads, in_weights, in_data, indices, out_data_grads, out_weight_grads, prefix_sum_length_data, N, post, len_length); } else { length_weighted_sum_with_main_input_gradient_kernel<T, IndexType, 32> <<<len_length, 32, 0, context_.cuda_stream()>>>( in_grads, in_weights, in_data, indices, out_data_grads, out_weight_grads, prefix_sum_length_data, N, post, len_length); } return true; } private: // menber field to manage memory Tensor inclusive_scan_buffer_{CUDA}; Tensor inclusive_scan_length_buffer_{CUDA}; }; // Needed because name is auto-generated in segment_reduction_op.cc:224 REGISTER_CUDA_OPERATOR_STR( "LengthsMaxWithMainInputAndForwardOutputGradient", CUDALengthsMaxWithMainInputAndForwardOutputGradientOp<float, CUDAContext>); REGISTER_CUDA_OPERATOR( SparseLengthsIndicesInGradientWeightedSumGradient, CUDASparseLengthsWeightedSumGradientWithIndicesOp<float, CUDAContext>); REGISTER_CUDA_OPERATOR( SparseLengthsIndicesInGradientWeightedSumWithMainInputGradient, CUDASparseLengthsIndicesInGradientWeightedSumWithMainInputGradientOp< float, CUDAContext>); REGISTER_CUDA_OPERATOR( SparseLengthsIndicesInGradientSumGradient, CUDASparseLengthsSumGradientWithIndicesOp<float, CUDAContext>); REGISTER_CUDA_OPERATOR( LengthsIndicesInGradientSumGradient, CUDASparseLengthsSumGradientWithIndicesOp<float, CUDAContext>); REGISTER_CUDA_OPERATOR( SparseLengthsIndicesInGradientMeanGradient, CUDASparseLengthsMeanGradientWithIndicesOp<float, CUDAContext>); REGISTER_CUDA_OPERATOR( LengthsIndicesInGradientMeanGradient, CUDASparseLengthsMeanGradientWithIndicesOp<float, CUDAContext>); } // namespace caffe2 // Macro doesn't like comma using LengthsSumCUDAOp = caffe2::CUDASparseLengthsSumOp<float, caffe2::CUDAContext, false>; using LengthsMeanCUDAOp = caffe2::CUDASparseLengthsMeanOp<float, caffe2::CUDAContext, false>; using LengthsMaxCUDAOp = caffe2::CUDASparseLengthsMaxOp<float, caffe2::CUDAContext, false>; C10_EXPORT_CAFFE2_OP_TO_C10_CUDA(LengthsSum, LengthsSumCUDAOp); C10_EXPORT_CAFFE2_OP_TO_C10_CUDA(LengthsMean, LengthsMeanCUDAOp); C10_EXPORT_CAFFE2_OP_TO_C10_CUDA(LengthsMax, LengthsMaxCUDAOp); #undef SEGREDUCE_MINBLOCKS
058824b765edd5c6a254184327802729bccca433.hip
// !!! This is a file automatically generated by hipify!!! /* Neural Net. * Host code. */ // includes, system #include <stdlib.h> #include <stdio.h> #include <string.h> #include <math.h> #include <chrono> #include <errno.h> #include "util.h" // includes, kernels #include "neuralnet_kernel.cu" #include "neuralnet_gold.h" //////////////////////////////////////////////////////////////////////////////// // declarations, forward extern "C" //void computeGold(); float* readInputs(char * filename, int rows, int columns); //////////////////////////////////////////////////////////////////////////////// // Program main //////////////////////////////////////////////////////////////////////////////// int main(int argc, char** argv) { TIME_IT("Sequential Code",1 , computeGold();) unsigned int size = NUM_ELEMENTS*(FEATURES+1);//some random number for now. Will change it to be the size of the dataset float* input_h = readInputs(argv[1], NUM_ELEMENTS, FEATURES+1); float* onehotR_h = readInputs(argv[2], NUM_ELEMENTS, NO_OF_CLASSES); float* train_labels_h = readInputs(argv[3], NUM_ELEMENTS, 1); float* w_h = (float*)malloc(NUM_ELEMENTS*(FEATURES+1)*sizeof(float)); float* v_h = (float*)malloc((HIDDEN_UNITS+1)*(NO_OF_CLASSES)*sizeof(float)); float* input_d; float* onehotR_d; float* train_labels_d; float* w_d; float* v_d; //Initializing the seed srand(-123); float a = -0.01; float b = 0.01; float *y_d; allocateDeviceArray(&y_d, NUM_ELEMENTS*NO_OF_CLASSES); hipMemset(y_d, 0, NUM_ELEMENTS*NO_OF_CLASSES*sizeof(float)); //read one hot label r and original label r into host variables and then transfer into device array allocateDeviceArray(&input_d, size); allocateDeviceArray(&onehotR_d, NUM_ELEMENTS*NO_OF_CLASSES); allocateDeviceArray(&train_labels_d, NUM_ELEMENTS); allocateDeviceArray(&w_d, NUM_ELEMENTS*(FEATURES+1)); allocateDeviceArray(&v_d, (HIDDEN_UNITS+1)*NO_OF_CLASSES); for(int x=0; x<HIDDEN_UNITS; x++) { for(int y=0; y<FEATURES+1; y++) { w_h[x*HIDDEN_UNITS + y] = ((float)rand()/RAND_MAX) * (b - a) + a; } } for(int x=0; x<NO_OF_CLASSES; x++) { for(int y=0; y<HIDDEN_UNITS+1; y++) { v_h[x*NO_OF_CLASSES + y] = ((float)rand()/RAND_MAX) * (b - a) + a; } } copyDataHostToDevice(input_d, input_h , size); copyDataHostToDevice(onehotR_d, onehotR_h , NUM_ELEMENTS*NO_OF_CLASSES); copyDataHostToDevice(train_labels_d, train_labels_h , NUM_ELEMENTS); copyDataHostToDevice(w_d, w_h , NUM_ELEMENTS*(FEATURES+1)); copyDataHostToDevice(v_d, v_h , (HIDDEN_UNITS+1)*NO_OF_CLASSES); /* This is the call you will use to time your parallel implementation */ printf("parallel code\n"); TIME_IT("getAccuracy", 1, getAccuracy(input_d, onehotR_d, train_labels_d, w_d, v_d, y_d);) float* y_h= (float*)malloc(NUM_ELEMENTS*NO_OF_CLASSES*sizeof(float)); copyDataDeviceToHost(y_h, y_d, NUM_ELEMENTS*NO_OF_CLASSES); float accuracy = calculateErrorRate(train_labels_h, y_h); printf("ACCURACY: %f\n",accuracy); //free memory freeMemory(input_d); freeMemory(onehotR_d); freeMemory(train_labels_d); freeMemory(w_d); freeMemory(v_d); return 0; } float* readInputs(char * filename, int rows, int columns) { char buff[100000]; FILE *fp; char *record,*line; int i=0,j=0; float* data = (float*)malloc(rows * columns * sizeof(float *)); //Opening train_data csv file fp = fopen(filename, "r"); if(!fp) { printf("\n file opening failed "); printf("Error: %s\n", strerror(errno)); return (float*)(-1) ; } //iterating over each line of train_data.csv while((line = fgets(buff,sizeof(buff),fp)) != NULL) { //Splitting line into pixel values based on comma record = strtok(line,","); while(record != NULL) { data[i * columns + j] = atof(record) ; ++j; record = strtok(NULL,","); } ++i ; j=0; } fclose(fp); free(record); free(line); return data; }
058824b765edd5c6a254184327802729bccca433.cu
/* Neural Net. * Host code. */ // includes, system #include <stdlib.h> #include <stdio.h> #include <string.h> #include <math.h> #include <chrono> #include <errno.h> #include "util.h" // includes, kernels #include "neuralnet_kernel.cu" #include "neuralnet_gold.h" //////////////////////////////////////////////////////////////////////////////// // declarations, forward extern "C" //void computeGold(); float* readInputs(char * filename, int rows, int columns); //////////////////////////////////////////////////////////////////////////////// // Program main //////////////////////////////////////////////////////////////////////////////// int main(int argc, char** argv) { TIME_IT("Sequential Code",1 , computeGold();) unsigned int size = NUM_ELEMENTS*(FEATURES+1);//some random number for now. Will change it to be the size of the dataset float* input_h = readInputs(argv[1], NUM_ELEMENTS, FEATURES+1); float* onehotR_h = readInputs(argv[2], NUM_ELEMENTS, NO_OF_CLASSES); float* train_labels_h = readInputs(argv[3], NUM_ELEMENTS, 1); float* w_h = (float*)malloc(NUM_ELEMENTS*(FEATURES+1)*sizeof(float)); float* v_h = (float*)malloc((HIDDEN_UNITS+1)*(NO_OF_CLASSES)*sizeof(float)); float* input_d; float* onehotR_d; float* train_labels_d; float* w_d; float* v_d; //Initializing the seed srand(-123); float a = -0.01; float b = 0.01; float *y_d; allocateDeviceArray(&y_d, NUM_ELEMENTS*NO_OF_CLASSES); cudaMemset(y_d, 0, NUM_ELEMENTS*NO_OF_CLASSES*sizeof(float)); //read one hot label r and original label r into host variables and then transfer into device array allocateDeviceArray(&input_d, size); allocateDeviceArray(&onehotR_d, NUM_ELEMENTS*NO_OF_CLASSES); allocateDeviceArray(&train_labels_d, NUM_ELEMENTS); allocateDeviceArray(&w_d, NUM_ELEMENTS*(FEATURES+1)); allocateDeviceArray(&v_d, (HIDDEN_UNITS+1)*NO_OF_CLASSES); for(int x=0; x<HIDDEN_UNITS; x++) { for(int y=0; y<FEATURES+1; y++) { w_h[x*HIDDEN_UNITS + y] = ((float)rand()/RAND_MAX) * (b - a) + a; } } for(int x=0; x<NO_OF_CLASSES; x++) { for(int y=0; y<HIDDEN_UNITS+1; y++) { v_h[x*NO_OF_CLASSES + y] = ((float)rand()/RAND_MAX) * (b - a) + a; } } copyDataHostToDevice(input_d, input_h , size); copyDataHostToDevice(onehotR_d, onehotR_h , NUM_ELEMENTS*NO_OF_CLASSES); copyDataHostToDevice(train_labels_d, train_labels_h , NUM_ELEMENTS); copyDataHostToDevice(w_d, w_h , NUM_ELEMENTS*(FEATURES+1)); copyDataHostToDevice(v_d, v_h , (HIDDEN_UNITS+1)*NO_OF_CLASSES); /* This is the call you will use to time your parallel implementation */ printf("parallel code\n"); TIME_IT("getAccuracy", 1, getAccuracy(input_d, onehotR_d, train_labels_d, w_d, v_d, y_d);) float* y_h= (float*)malloc(NUM_ELEMENTS*NO_OF_CLASSES*sizeof(float)); copyDataDeviceToHost(y_h, y_d, NUM_ELEMENTS*NO_OF_CLASSES); float accuracy = calculateErrorRate(train_labels_h, y_h); printf("ACCURACY: %f\n",accuracy); //free memory freeMemory(input_d); freeMemory(onehotR_d); freeMemory(train_labels_d); freeMemory(w_d); freeMemory(v_d); return 0; } float* readInputs(char * filename, int rows, int columns) { char buff[100000]; FILE *fp; char *record,*line; int i=0,j=0; float* data = (float*)malloc(rows * columns * sizeof(float *)); //Opening train_data csv file fp = fopen(filename, "r"); if(!fp) { printf("\n file opening failed "); printf("Error: %s\n", strerror(errno)); return (float*)(-1) ; } //iterating over each line of train_data.csv while((line = fgets(buff,sizeof(buff),fp)) != NULL) { //Splitting line into pixel values based on comma record = strtok(line,","); while(record != NULL) { data[i * columns + j] = atof(record) ; ++j; record = strtok(NULL,","); } ++i ; j=0; } fclose(fp); free(record); free(line); return data; }
dbe8d89abaaf6dcf802a90899fdd565f4fc16946.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /* NiuTrans.Tensor - an open-source tensor library * Copyright (C) 2017, Natural Language Processing Lab, Northeastern University. * All rights reserved. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ /* * $Created by: XIAO Tong (email: [email protected]) 2018-04-24 */ #include "../../XDevice.h" #include "../../XTensor.h" #include "Unsqueeze.h" #include "Unsqueeze.cuh" namespace nts { // namespace nts(NiuTrans.Tensor) #ifdef USE_ROCM /* insert a dimension by copying the blocks for n times (where n is the size of the inerted dimension) >> s - pointer to the source data array >> blockSize - size of a block >> totalSize - total size of the blocks (i.e., blockSIze * n) >> t - pointer to the target data array >> n - number of blocks to copy data */ template<class T> __global__ void KernelUnsqueezeFlat(void * s, int blockSize, int totalSize, void * t, int n) { /* index of data items */ int i = blockDim.x * blockIdx.x + threadIdx.x; if (i >= blockSize) return; T value = ((T*)s)[i]; T * tData = (T*)t; __syncthreads(); for (int k = i; k < totalSize; k += blockSize) tData[k] = value; } /* insert a dimension by copying the blocks for n times (where n is the size of the inerted dimension) >> s - pointer to the source data array >> blockSize - size of a block >> totalSize - total size of the blocks (i.e., blockSIze * n) >> t - pointer to the target data array >> n - number of blocks to copy data */ template<class T> __global__ void KernelUnsqueezeFlatBigram(void * s, int blockSize, int totalSize, void * t, int n) { /* index of data items */ int i = (blockDim.x * blockIdx.x + threadIdx.x) * 2; if (i >= blockSize) return; T value = ((T*)s)[i]; T value2 = ((T*)s)[i + 1]; T * tData = (T*)t; __syncthreads(); for (int k = i; k < totalSize; k += blockSize){ tData[k] = value; tData[k + 1] = value2; } } /* insert a dimension by copying the blocks for n times (where n is the size of the inerted dimension) >> s - pointer to the source data array >> blockSize - size of a block >> totalSize - total size of the blocks (i.e., blockSIze * n) >> t - pointer to the target data array >> n - number of blocks to copy data */ template<class T> __global__ void KernelUnsqueezeFlat2D(void * s, int blockSize, int totalSize, void * t, int n) { __shared__ T data[MAX_CUDA_THREAD_NUM_PER_BLOCK]; __shared__ int offsets[MAX_CUDA_THREAD_NUM_PER_BLOCK]; /* index of data items */ int i = blockDim.x * blockIdx.x + threadIdx.x; /* index of data items */ int j = blockDim.y * blockIdx.y + threadIdx.y; if (i >= blockSize || j >= n) return; if(threadIdx.y == 0) data[threadIdx.x] = ((T*)s)[i]; if(threadIdx.x == 0) offsets[threadIdx.y] = blockSize * j; __syncthreads(); ((T*)t)[offsets[threadIdx.y] + i] = data[threadIdx.x]; } /* insert a dimension by copying the blocks for n times (where n is the size of the inerted dimension) >> s - pointer to the source data array >> blockSize - size of a block >> blockNum - number of the blocks >> totalSize - total size of the blocks (i.e., blockSize * n) >> t - pointer to the target data array >> n - number of blocks to copy data */ template<class T> __global__ void KernelUnsqueeze(void * s, int blockSize, int blockNum, int totalSize, void * t, int n) { /* index of data items */ int i = blockDim.x * blockIdx.x + threadIdx.x; /* block index */ int j = blockDim.y * blockIdx.y + threadIdx.y; if (i >= blockSize || j >= blockNum) return; MTYPE offset = blockSize * j; T value = ((T*)s)[offset + i]; T * tData = (T*)t + offset * n; __syncthreads(); for (int k = i; k < totalSize; k += blockSize) tData[k] = value; } /* insert a dimension by copying the blocks for n times (where n is the size of the inerted dimension) This is special case where we actually copy a v-dimentional column vector by n times to form a v * n matrix >> s - pointer to the source data array >> rowNum - number of rows (i.e., dimension size of s) >> colNum - number of columns (i.e., number of copies) >> t - pointer to the target data array */ template<class T> __global__ void KernelUnsqueezeByCol(void * s, int rowNum, int colNum, void * t) { __shared__ T values[MAX_CUDA_THREAD_NUM_PER_BLOCK]; __shared__ T * ts[MAX_CUDA_THREAD_NUM_PER_BLOCK]; /* column index */ int i = blockDim.x * blockIdx.x + threadIdx.x; /* row index */ int j = blockDim.y * blockIdx.y + threadIdx.y; if (i >= colNum || j >= rowNum) return; if(threadIdx.x == 0){ values[threadIdx.y] = ((T*)s)[j]; ts[threadIdx.y] = (T*)t + colNum * j; } __syncthreads(); ts[threadIdx.y][i] = values[threadIdx.y]; } /* insert a dimension by copying the blocks for n times (where n is the size of the inerted dimension) This is special case where we actually copy a v-dimentional column vector by n times to form a v * n matrix And a row is very big so that it occupies the cuda threads in a block >> s - pointer to the source data array >> rowNum - number of rows (i.e., dimension size of s) >> colNum - number of columns (i.e., number of copies) >> t - pointer to the target data array */ template<class T> __global__ void KernelUnsqueezeByColBigRow(void * s, int rowNum, int colNum, void * t) { __shared__ T value; __shared__ T * tData; /* column index */ int i = blockDim.x * blockIdx.x + threadIdx.x; /* row index */ int j = blockDim.y * blockIdx.y + threadIdx.y; if (i >= colNum || j >= rowNum) return; if (threadIdx.x == 0) { value = ((T*)s)[j]; tData = (T*)t + colNum * j; } __syncthreads(); tData[i] = value; } /* insert a dimension by copying the blocks for x times (where x is the size of the inerted dimension) >> a - input tensor >> b - output tensor >> dim - where to insert the dimension >> dSize - size of the newly-inserted dimension */ void _CudaUnsqueeze(const XTensor * a, XTensor * b, int dim, int dSize) { int blockSize = 1; int blockNumA = 1; int blockNumB = 1; for (int i = dim; i < a->order; i++) blockSize *= a->dimSize[i]; blockNumA = a->unitNum / blockSize; blockNumB = b->unitNum / blockSize; CheckNTErrors((blockNumA * dSize == blockNumB), "Unmatched tensors!");; int cudaGrids[3]; int cudaBlocks[3]; int devIDBackup = 0; ProtectCudaDev(a->devID, devIDBackup); if (dim == b->order - 1) { GDevs.GetCudaThread2D(a->devID, dSize, blockNumA, MAX_INT, cudaGrids, cudaBlocks); if (a->dataType == X_FLOAT && b->dataType == X_FLOAT) { if (cudaBlocks[1] == 1) KernelUnsqueezeByColBigRow<float> << <dim3(cudaGrids[0], cudaGrids[1]), dim3(cudaBlocks[0], cudaBlocks[1]) >> > (a->data, blockNumA, dSize, b->data); else KernelUnsqueezeByCol<float> << <dim3(cudaGrids[0], cudaGrids[1]), dim3(cudaBlocks[0], cudaBlocks[1]) >> > (a->data, blockNumA, dSize, b->data); } else if (a->dataType == X_INT && b->dataType == X_INT) { if (cudaBlocks[1] == 1) KernelUnsqueezeByColBigRow<int> << <dim3(cudaGrids[0], cudaGrids[1]), dim3(cudaBlocks[0], cudaBlocks[1]) >> > (a->data, blockNumA, dSize, b->data); else KernelUnsqueezeByCol<int> << <dim3(cudaGrids[0], cudaGrids[1]), dim3(cudaBlocks[0], cudaBlocks[1]) >> > (a->data, blockNumA, dSize, b->data); } else if (a->dataType == X_FLOAT16 && b->dataType == X_FLOAT16) { if (cudaBlocks[1] == 1) KernelUnsqueezeByColBigRow<__half> << <dim3(cudaGrids[0], cudaGrids[1]), dim3(cudaBlocks[0], cudaBlocks[1]) >> > (a->data, blockNumA, dSize, b->data); else KernelUnsqueezeByCol<__half> << <dim3(cudaGrids[0], cudaGrids[1]), dim3(cudaBlocks[0], cudaBlocks[1]) >> > (a->data, blockNumA, dSize, b->data); } else { ShowNTErrors("TODO!"); } } else if(blockNumA > 1){ GDevs.GetCudaThread2D(a->devID, blockSize, blockNumA, MAX_INT, cudaGrids, cudaBlocks); if (a->dataType == X_FLOAT && b->dataType == X_FLOAT) { KernelUnsqueeze<float> << <dim3(cudaGrids[0], cudaGrids[1]), dim3(cudaBlocks[0], cudaBlocks[1]) >> > (a->data, blockSize, blockNumA, blockSize * dSize, b->data, dSize); } else if (a->dataType == X_INT && b->dataType == X_INT) { KernelUnsqueeze<int> << <dim3(cudaGrids[0], cudaGrids[1]), dim3(cudaBlocks[0], cudaBlocks[1]) >> > (a->data, blockSize, blockNumA, blockSize * dSize, b->data, dSize); } else if (a->dataType == X_FLOAT16 && b->dataType == X_FLOAT16) { KernelUnsqueeze<half> << <dim3(cudaGrids[0], cudaGrids[1]), dim3(cudaBlocks[0], cudaBlocks[1]) >> > (a->data, blockSize, blockNumA, blockSize * dSize, b->data, dSize); } else { ShowNTErrors("TODO!"); } } else if(blockNumA == 1 && blockSize < MAX_CUDA_THREAD_NUM_PER_BLOCK){ GDevs.GetCudaThread2D(a->devID, blockSize, dSize, MAX_CUDA_THREAD_NUM_PER_BLOCK/4, cudaGrids, cudaBlocks); if (a->dataType == X_FLOAT && b->dataType == X_FLOAT) { KernelUnsqueezeFlat2D<float> << <dim3(cudaGrids[0], cudaGrids[1]), dim3(cudaBlocks[0], cudaBlocks[1]) >> > (a->data, blockSize, blockSize * dSize, b->data, dSize); } else if (a->dataType == X_INT && b->dataType == X_INT) { KernelUnsqueezeFlat2D<int> << <dim3(cudaGrids[0], cudaGrids[1]), dim3(cudaBlocks[0], cudaBlocks[1]) >> > (a->data, blockSize, blockSize * dSize, b->data, dSize); } else if (a->dataType == X_FLOAT16 && b->dataType == X_FLOAT16) { KernelUnsqueezeFlat2D<half> << <dim3(cudaGrids[0], cudaGrids[1]), dim3(cudaBlocks[0], cudaBlocks[1]) >> > (a->data, blockSize, blockSize * dSize, b->data, dSize); } else { ShowNTErrors("TODO!"); } } else if(blockNumA == 1 && blockSize % 2 == 0){ GDevs.GetCudaThread(a->devID, blockSize/2, cudaGrids, cudaBlocks); if (a->dataType == X_FLOAT && b->dataType == X_FLOAT) { KernelUnsqueezeFlatBigram<float> << <dim3(cudaGrids[0]), dim3(cudaBlocks[0]) >> > (a->data, blockSize, blockSize * dSize, b->data, dSize); } else if (a->dataType == X_INT && b->dataType == X_INT) { KernelUnsqueezeFlatBigram<int> << <dim3(cudaGrids[0]), dim3(cudaBlocks[0]) >> > (a->data, blockSize, blockSize * dSize, b->data, dSize); } else if (a->dataType == X_FLOAT16 && b->dataType == X_FLOAT16) { KernelUnsqueezeFlatBigram<half> << <dim3(cudaGrids[0]), dim3(cudaBlocks[0]) >> > (a->data, blockSize, blockSize * dSize, b->data, dSize); } else { ShowNTErrors("TODO!"); } } else if(blockNumA == 1){ GDevs.GetCudaThread(a->devID, blockSize, cudaGrids, cudaBlocks); if (a->dataType == X_FLOAT && b->dataType == X_FLOAT) { KernelUnsqueezeFlat<float> << <dim3(cudaGrids[0]), dim3(cudaBlocks[0]) >> > (a->data, blockSize, blockSize * dSize, b->data, dSize); } else if (a->dataType == X_INT && b->dataType == X_INT) { KernelUnsqueezeFlat<int> << <dim3(cudaGrids[0]), dim3(cudaBlocks[0]) >> > (a->data, blockSize, blockSize * dSize, b->data, dSize); } else if (a->dataType == X_FLOAT16 && b->dataType == X_FLOAT16) { KernelUnsqueezeFlat<half> << <dim3(cudaGrids[0]), dim3(cudaBlocks[0]) >> > (a->data, blockSize, blockSize * dSize, b->data, dSize); } else { ShowNTErrors("TODO!"); } } else{ ShowNTErrors("Something is wrong!"); } BacktoCudaDev(a->devID, devIDBackup); } #endif // USE_ROCM } // namespace nts(NiuTrans.Tensor)
dbe8d89abaaf6dcf802a90899fdd565f4fc16946.cu
/* NiuTrans.Tensor - an open-source tensor library * Copyright (C) 2017, Natural Language Processing Lab, Northeastern University. * All rights reserved. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ /* * $Created by: XIAO Tong (email: [email protected]) 2018-04-24 */ #include "../../XDevice.h" #include "../../XTensor.h" #include "Unsqueeze.h" #include "Unsqueeze.cuh" namespace nts { // namespace nts(NiuTrans.Tensor) #ifdef USE_CUDA /* insert a dimension by copying the blocks for n times (where n is the size of the inerted dimension) >> s - pointer to the source data array >> blockSize - size of a block >> totalSize - total size of the blocks (i.e., blockSIze * n) >> t - pointer to the target data array >> n - number of blocks to copy data */ template<class T> __global__ void KernelUnsqueezeFlat(void * s, int blockSize, int totalSize, void * t, int n) { /* index of data items */ int i = blockDim.x * blockIdx.x + threadIdx.x; if (i >= blockSize) return; T value = ((T*)s)[i]; T * tData = (T*)t; __syncthreads(); for (int k = i; k < totalSize; k += blockSize) tData[k] = value; } /* insert a dimension by copying the blocks for n times (where n is the size of the inerted dimension) >> s - pointer to the source data array >> blockSize - size of a block >> totalSize - total size of the blocks (i.e., blockSIze * n) >> t - pointer to the target data array >> n - number of blocks to copy data */ template<class T> __global__ void KernelUnsqueezeFlatBigram(void * s, int blockSize, int totalSize, void * t, int n) { /* index of data items */ int i = (blockDim.x * blockIdx.x + threadIdx.x) * 2; if (i >= blockSize) return; T value = ((T*)s)[i]; T value2 = ((T*)s)[i + 1]; T * tData = (T*)t; __syncthreads(); for (int k = i; k < totalSize; k += blockSize){ tData[k] = value; tData[k + 1] = value2; } } /* insert a dimension by copying the blocks for n times (where n is the size of the inerted dimension) >> s - pointer to the source data array >> blockSize - size of a block >> totalSize - total size of the blocks (i.e., blockSIze * n) >> t - pointer to the target data array >> n - number of blocks to copy data */ template<class T> __global__ void KernelUnsqueezeFlat2D(void * s, int blockSize, int totalSize, void * t, int n) { __shared__ T data[MAX_CUDA_THREAD_NUM_PER_BLOCK]; __shared__ int offsets[MAX_CUDA_THREAD_NUM_PER_BLOCK]; /* index of data items */ int i = blockDim.x * blockIdx.x + threadIdx.x; /* index of data items */ int j = blockDim.y * blockIdx.y + threadIdx.y; if (i >= blockSize || j >= n) return; if(threadIdx.y == 0) data[threadIdx.x] = ((T*)s)[i]; if(threadIdx.x == 0) offsets[threadIdx.y] = blockSize * j; __syncthreads(); ((T*)t)[offsets[threadIdx.y] + i] = data[threadIdx.x]; } /* insert a dimension by copying the blocks for n times (where n is the size of the inerted dimension) >> s - pointer to the source data array >> blockSize - size of a block >> blockNum - number of the blocks >> totalSize - total size of the blocks (i.e., blockSize * n) >> t - pointer to the target data array >> n - number of blocks to copy data */ template<class T> __global__ void KernelUnsqueeze(void * s, int blockSize, int blockNum, int totalSize, void * t, int n) { /* index of data items */ int i = blockDim.x * blockIdx.x + threadIdx.x; /* block index */ int j = blockDim.y * blockIdx.y + threadIdx.y; if (i >= blockSize || j >= blockNum) return; MTYPE offset = blockSize * j; T value = ((T*)s)[offset + i]; T * tData = (T*)t + offset * n; __syncthreads(); for (int k = i; k < totalSize; k += blockSize) tData[k] = value; } /* insert a dimension by copying the blocks for n times (where n is the size of the inerted dimension) This is special case where we actually copy a v-dimentional column vector by n times to form a v * n matrix >> s - pointer to the source data array >> rowNum - number of rows (i.e., dimension size of s) >> colNum - number of columns (i.e., number of copies) >> t - pointer to the target data array */ template<class T> __global__ void KernelUnsqueezeByCol(void * s, int rowNum, int colNum, void * t) { __shared__ T values[MAX_CUDA_THREAD_NUM_PER_BLOCK]; __shared__ T * ts[MAX_CUDA_THREAD_NUM_PER_BLOCK]; /* column index */ int i = blockDim.x * blockIdx.x + threadIdx.x; /* row index */ int j = blockDim.y * blockIdx.y + threadIdx.y; if (i >= colNum || j >= rowNum) return; if(threadIdx.x == 0){ values[threadIdx.y] = ((T*)s)[j]; ts[threadIdx.y] = (T*)t + colNum * j; } __syncthreads(); ts[threadIdx.y][i] = values[threadIdx.y]; } /* insert a dimension by copying the blocks for n times (where n is the size of the inerted dimension) This is special case where we actually copy a v-dimentional column vector by n times to form a v * n matrix And a row is very big so that it occupies the cuda threads in a block >> s - pointer to the source data array >> rowNum - number of rows (i.e., dimension size of s) >> colNum - number of columns (i.e., number of copies) >> t - pointer to the target data array */ template<class T> __global__ void KernelUnsqueezeByColBigRow(void * s, int rowNum, int colNum, void * t) { __shared__ T value; __shared__ T * tData; /* column index */ int i = blockDim.x * blockIdx.x + threadIdx.x; /* row index */ int j = blockDim.y * blockIdx.y + threadIdx.y; if (i >= colNum || j >= rowNum) return; if (threadIdx.x == 0) { value = ((T*)s)[j]; tData = (T*)t + colNum * j; } __syncthreads(); tData[i] = value; } /* insert a dimension by copying the blocks for x times (where x is the size of the inerted dimension) >> a - input tensor >> b - output tensor >> dim - where to insert the dimension >> dSize - size of the newly-inserted dimension */ void _CudaUnsqueeze(const XTensor * a, XTensor * b, int dim, int dSize) { int blockSize = 1; int blockNumA = 1; int blockNumB = 1; for (int i = dim; i < a->order; i++) blockSize *= a->dimSize[i]; blockNumA = a->unitNum / blockSize; blockNumB = b->unitNum / blockSize; CheckNTErrors((blockNumA * dSize == blockNumB), "Unmatched tensors!");; int cudaGrids[3]; int cudaBlocks[3]; int devIDBackup = 0; ProtectCudaDev(a->devID, devIDBackup); if (dim == b->order - 1) { GDevs.GetCudaThread2D(a->devID, dSize, blockNumA, MAX_INT, cudaGrids, cudaBlocks); if (a->dataType == X_FLOAT && b->dataType == X_FLOAT) { if (cudaBlocks[1] == 1) KernelUnsqueezeByColBigRow<float> << <dim3(cudaGrids[0], cudaGrids[1]), dim3(cudaBlocks[0], cudaBlocks[1]) >> > (a->data, blockNumA, dSize, b->data); else KernelUnsqueezeByCol<float> << <dim3(cudaGrids[0], cudaGrids[1]), dim3(cudaBlocks[0], cudaBlocks[1]) >> > (a->data, blockNumA, dSize, b->data); } else if (a->dataType == X_INT && b->dataType == X_INT) { if (cudaBlocks[1] == 1) KernelUnsqueezeByColBigRow<int> << <dim3(cudaGrids[0], cudaGrids[1]), dim3(cudaBlocks[0], cudaBlocks[1]) >> > (a->data, blockNumA, dSize, b->data); else KernelUnsqueezeByCol<int> << <dim3(cudaGrids[0], cudaGrids[1]), dim3(cudaBlocks[0], cudaBlocks[1]) >> > (a->data, blockNumA, dSize, b->data); } else if (a->dataType == X_FLOAT16 && b->dataType == X_FLOAT16) { if (cudaBlocks[1] == 1) KernelUnsqueezeByColBigRow<__half> << <dim3(cudaGrids[0], cudaGrids[1]), dim3(cudaBlocks[0], cudaBlocks[1]) >> > (a->data, blockNumA, dSize, b->data); else KernelUnsqueezeByCol<__half> << <dim3(cudaGrids[0], cudaGrids[1]), dim3(cudaBlocks[0], cudaBlocks[1]) >> > (a->data, blockNumA, dSize, b->data); } else { ShowNTErrors("TODO!"); } } else if(blockNumA > 1){ GDevs.GetCudaThread2D(a->devID, blockSize, blockNumA, MAX_INT, cudaGrids, cudaBlocks); if (a->dataType == X_FLOAT && b->dataType == X_FLOAT) { KernelUnsqueeze<float> << <dim3(cudaGrids[0], cudaGrids[1]), dim3(cudaBlocks[0], cudaBlocks[1]) >> > (a->data, blockSize, blockNumA, blockSize * dSize, b->data, dSize); } else if (a->dataType == X_INT && b->dataType == X_INT) { KernelUnsqueeze<int> << <dim3(cudaGrids[0], cudaGrids[1]), dim3(cudaBlocks[0], cudaBlocks[1]) >> > (a->data, blockSize, blockNumA, blockSize * dSize, b->data, dSize); } else if (a->dataType == X_FLOAT16 && b->dataType == X_FLOAT16) { KernelUnsqueeze<half> << <dim3(cudaGrids[0], cudaGrids[1]), dim3(cudaBlocks[0], cudaBlocks[1]) >> > (a->data, blockSize, blockNumA, blockSize * dSize, b->data, dSize); } else { ShowNTErrors("TODO!"); } } else if(blockNumA == 1 && blockSize < MAX_CUDA_THREAD_NUM_PER_BLOCK){ GDevs.GetCudaThread2D(a->devID, blockSize, dSize, MAX_CUDA_THREAD_NUM_PER_BLOCK/4, cudaGrids, cudaBlocks); if (a->dataType == X_FLOAT && b->dataType == X_FLOAT) { KernelUnsqueezeFlat2D<float> << <dim3(cudaGrids[0], cudaGrids[1]), dim3(cudaBlocks[0], cudaBlocks[1]) >> > (a->data, blockSize, blockSize * dSize, b->data, dSize); } else if (a->dataType == X_INT && b->dataType == X_INT) { KernelUnsqueezeFlat2D<int> << <dim3(cudaGrids[0], cudaGrids[1]), dim3(cudaBlocks[0], cudaBlocks[1]) >> > (a->data, blockSize, blockSize * dSize, b->data, dSize); } else if (a->dataType == X_FLOAT16 && b->dataType == X_FLOAT16) { KernelUnsqueezeFlat2D<half> << <dim3(cudaGrids[0], cudaGrids[1]), dim3(cudaBlocks[0], cudaBlocks[1]) >> > (a->data, blockSize, blockSize * dSize, b->data, dSize); } else { ShowNTErrors("TODO!"); } } else if(blockNumA == 1 && blockSize % 2 == 0){ GDevs.GetCudaThread(a->devID, blockSize/2, cudaGrids, cudaBlocks); if (a->dataType == X_FLOAT && b->dataType == X_FLOAT) { KernelUnsqueezeFlatBigram<float> << <dim3(cudaGrids[0]), dim3(cudaBlocks[0]) >> > (a->data, blockSize, blockSize * dSize, b->data, dSize); } else if (a->dataType == X_INT && b->dataType == X_INT) { KernelUnsqueezeFlatBigram<int> << <dim3(cudaGrids[0]), dim3(cudaBlocks[0]) >> > (a->data, blockSize, blockSize * dSize, b->data, dSize); } else if (a->dataType == X_FLOAT16 && b->dataType == X_FLOAT16) { KernelUnsqueezeFlatBigram<half> << <dim3(cudaGrids[0]), dim3(cudaBlocks[0]) >> > (a->data, blockSize, blockSize * dSize, b->data, dSize); } else { ShowNTErrors("TODO!"); } } else if(blockNumA == 1){ GDevs.GetCudaThread(a->devID, blockSize, cudaGrids, cudaBlocks); if (a->dataType == X_FLOAT && b->dataType == X_FLOAT) { KernelUnsqueezeFlat<float> << <dim3(cudaGrids[0]), dim3(cudaBlocks[0]) >> > (a->data, blockSize, blockSize * dSize, b->data, dSize); } else if (a->dataType == X_INT && b->dataType == X_INT) { KernelUnsqueezeFlat<int> << <dim3(cudaGrids[0]), dim3(cudaBlocks[0]) >> > (a->data, blockSize, blockSize * dSize, b->data, dSize); } else if (a->dataType == X_FLOAT16 && b->dataType == X_FLOAT16) { KernelUnsqueezeFlat<half> << <dim3(cudaGrids[0]), dim3(cudaBlocks[0]) >> > (a->data, blockSize, blockSize * dSize, b->data, dSize); } else { ShowNTErrors("TODO!"); } } else{ ShowNTErrors("Something is wrong!"); } BacktoCudaDev(a->devID, devIDBackup); } #endif // USE_CUDA } // namespace nts(NiuTrans.Tensor)
cdd6ff44b00d210ebeb46286237cc1c24e13c6fd.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include <vector> #include "caffe/layers/accuracy_layer.hpp" #include "caffe/util/math_functions.hpp" namespace caffe { template <typename Dtype> __global__ void AccuracyForwardGPU(const int nthreads, const Dtype* bottom_data, const Dtype* label, Dtype* acc, const int num, const int dim, const int spatial_dim, const int num_labels, const int top_k, const bool has_ignore_label_, const int ignore_label_, Dtype* counts) { CUDA_KERNEL_LOOP(index, nthreads) { const int n = index / spatial_dim; const int s = index % spatial_dim; const int label_value = static_cast<int>(label[n * spatial_dim + s]); const Dtype prob_of_true_class = bottom_data[n * dim + label_value * spatial_dim + s]; int num_better_predictions = -1; // true_class also counts as "better" if (has_ignore_label_ && label_value == ignore_label_) { acc[index] = 0; counts[index] = 0; } else { for (int k = 0; k < num_labels & num_better_predictions < top_k; k++) { num_better_predictions += (bottom_data[n * dim + k * spatial_dim + s] >= prob_of_true_class); } acc[index] = (num_better_predictions < top_k); counts[index] = 1; } } } template <typename Dtype> __global__ void AccuracyForwardWithPerClassGPU(const int nthreads, const Dtype* bottom_data, const Dtype* label, Dtype* acc, Dtype* counts, const int num, const int dim, const int spatial_dim, const int num_labels, const int top_k, const bool has_ignore_label_, const int ignore_label_) { CUDA_KERNEL_LOOP(index, nthreads) { const int n = index / spatial_dim; const int s = index % spatial_dim; const int label_value = static_cast<int>(label[n * spatial_dim + s]); const Dtype prob_of_true_class = bottom_data[n * dim + label_value * spatial_dim + s]; if (has_ignore_label_ && label_value == ignore_label_) { // nothing to be done. } else { int num_better_predictions = -1; // true_class also counts as "better" for (int k = 0; k < num_labels & num_better_predictions < top_k; k++) { num_better_predictions += (bottom_data[n * dim + k * spatial_dim + s] >= prob_of_true_class); } acc[label_value*nthreads + index] += (num_better_predictions < top_k); counts[label_value*nthreads + index] = 1; } } } template <typename Dtype> void AccuracyLayer<Dtype>::Forward_gpu( const vector<Blob<Dtype>*>& bottom, const vector<Blob<Dtype>*>& top) { const Dtype* bottom_data = bottom[0]->gpu_data(); const Dtype* bottom_label = bottom[1]->gpu_data(); const int dim = bottom[0]->count() / outer_num_; const int num_labels = bottom[0]->shape(label_axis_); const int nthreads = outer_num_ * inner_num_; // Since this memory is not used for anything, // we use it here to avoid having to allocate new GPU // memory to accumulate intermediate results in the kernel. Dtype* acc_data = bottom[0]->mutable_gpu_diff(); if (top.size() == 1) { // simple case - report only global accuracy. // Similarly, this memory is never used elsewhere, and thus we can use it // to avoid having to allocate additional GPU memory. Dtype* counts = bottom[1]->mutable_gpu_diff(); // NOLINT_NEXT_LINE(whitespace/operators) hipLaunchKernelGGL(( AccuracyForwardGPU<Dtype>), dim3(CAFFE_GET_BLOCKS(nthreads)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0, nthreads, bottom_data, bottom_label, acc_data, outer_num_, dim, inner_num_, num_labels, top_k_, has_ignore_label_, ignore_label_, counts); Dtype acc; caffe_gpu_asum(nthreads, acc_data, &acc); Dtype valid_count; caffe_gpu_asum(nthreads, counts, &valid_count); if (valid_count > 0) { top[0]->mutable_cpu_data()[0] = acc / valid_count; } else { top[0]->mutable_cpu_data()[0] = 0; } } else { // need to report per-class accuracy as well // allocate space for more detailed "counts" nums_buffer_.ReshapeLike(*bottom[0]); Dtype* counts = nums_buffer_.mutable_gpu_data(); caffe_gpu_set(bottom[0]->count(), Dtype(0), acc_data); caffe_gpu_set(nums_buffer_.count(), Dtype(0), counts); // NOLINT_NEXT_LINE(whitespace/operators) hipLaunchKernelGGL(( AccuracyForwardWithPerClassGPU<Dtype>), dim3(CAFFE_GET_BLOCKS(nthreads)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0, nthreads, bottom_data, bottom_label, acc_data, counts, outer_num_, dim, inner_num_, num_labels, top_k_, has_ignore_label_, ignore_label_); // get the overall accuracy Dtype acc; caffe_gpu_asum(bottom[0]->count(), acc_data, &acc); Dtype valid_count; caffe_gpu_asum(nums_buffer_.count(), counts, &valid_count); if (valid_count > 0) { top[0]->mutable_cpu_data()[0] = acc / valid_count; } else { top[0]->mutable_cpu_data()[0] = 0; } // get per-class accuracy Dtype* per_class_acc = top[1]->mutable_cpu_data(); for (int l = 0; l < num_labels; l++) { caffe_gpu_asum(nthreads, acc_data + l*nthreads, per_class_acc+l); caffe_gpu_asum(nthreads, counts + l*nthreads, &valid_count); if (valid_count > 0) { per_class_acc[l] /= valid_count; } else { per_class_acc[l] = 0; } } } } template <typename Dtype> void AccuracyLayer<Dtype>::Backward_gpu(const vector<Blob<Dtype>*>& top, const vector<bool>& propagate_down, const vector<Blob<Dtype>*>& bottom) { if (propagate_down[1]) { NOT_IMPLEMENTED; } } INSTANTIATE_LAYER_GPU_FUNCS(AccuracyLayer); } // namespace caffe
cdd6ff44b00d210ebeb46286237cc1c24e13c6fd.cu
#include <vector> #include "caffe/layers/accuracy_layer.hpp" #include "caffe/util/math_functions.hpp" namespace caffe { template <typename Dtype> __global__ void AccuracyForwardGPU(const int nthreads, const Dtype* bottom_data, const Dtype* label, Dtype* acc, const int num, const int dim, const int spatial_dim, const int num_labels, const int top_k, const bool has_ignore_label_, const int ignore_label_, Dtype* counts) { CUDA_KERNEL_LOOP(index, nthreads) { const int n = index / spatial_dim; const int s = index % spatial_dim; const int label_value = static_cast<int>(label[n * spatial_dim + s]); const Dtype prob_of_true_class = bottom_data[n * dim + label_value * spatial_dim + s]; int num_better_predictions = -1; // true_class also counts as "better" if (has_ignore_label_ && label_value == ignore_label_) { acc[index] = 0; counts[index] = 0; } else { for (int k = 0; k < num_labels & num_better_predictions < top_k; k++) { num_better_predictions += (bottom_data[n * dim + k * spatial_dim + s] >= prob_of_true_class); } acc[index] = (num_better_predictions < top_k); counts[index] = 1; } } } template <typename Dtype> __global__ void AccuracyForwardWithPerClassGPU(const int nthreads, const Dtype* bottom_data, const Dtype* label, Dtype* acc, Dtype* counts, const int num, const int dim, const int spatial_dim, const int num_labels, const int top_k, const bool has_ignore_label_, const int ignore_label_) { CUDA_KERNEL_LOOP(index, nthreads) { const int n = index / spatial_dim; const int s = index % spatial_dim; const int label_value = static_cast<int>(label[n * spatial_dim + s]); const Dtype prob_of_true_class = bottom_data[n * dim + label_value * spatial_dim + s]; if (has_ignore_label_ && label_value == ignore_label_) { // nothing to be done. } else { int num_better_predictions = -1; // true_class also counts as "better" for (int k = 0; k < num_labels & num_better_predictions < top_k; k++) { num_better_predictions += (bottom_data[n * dim + k * spatial_dim + s] >= prob_of_true_class); } acc[label_value*nthreads + index] += (num_better_predictions < top_k); counts[label_value*nthreads + index] = 1; } } } template <typename Dtype> void AccuracyLayer<Dtype>::Forward_gpu( const vector<Blob<Dtype>*>& bottom, const vector<Blob<Dtype>*>& top) { const Dtype* bottom_data = bottom[0]->gpu_data(); const Dtype* bottom_label = bottom[1]->gpu_data(); const int dim = bottom[0]->count() / outer_num_; const int num_labels = bottom[0]->shape(label_axis_); const int nthreads = outer_num_ * inner_num_; // Since this memory is not used for anything, // we use it here to avoid having to allocate new GPU // memory to accumulate intermediate results in the kernel. Dtype* acc_data = bottom[0]->mutable_gpu_diff(); if (top.size() == 1) { // simple case - report only global accuracy. // Similarly, this memory is never used elsewhere, and thus we can use it // to avoid having to allocate additional GPU memory. Dtype* counts = bottom[1]->mutable_gpu_diff(); // NOLINT_NEXT_LINE(whitespace/operators) AccuracyForwardGPU<Dtype><<<CAFFE_GET_BLOCKS(nthreads), CAFFE_CUDA_NUM_THREADS>>>(nthreads, bottom_data, bottom_label, acc_data, outer_num_, dim, inner_num_, num_labels, top_k_, has_ignore_label_, ignore_label_, counts); Dtype acc; caffe_gpu_asum(nthreads, acc_data, &acc); Dtype valid_count; caffe_gpu_asum(nthreads, counts, &valid_count); if (valid_count > 0) { top[0]->mutable_cpu_data()[0] = acc / valid_count; } else { top[0]->mutable_cpu_data()[0] = 0; } } else { // need to report per-class accuracy as well // allocate space for more detailed "counts" nums_buffer_.ReshapeLike(*bottom[0]); Dtype* counts = nums_buffer_.mutable_gpu_data(); caffe_gpu_set(bottom[0]->count(), Dtype(0), acc_data); caffe_gpu_set(nums_buffer_.count(), Dtype(0), counts); // NOLINT_NEXT_LINE(whitespace/operators) AccuracyForwardWithPerClassGPU<Dtype><<<CAFFE_GET_BLOCKS(nthreads), CAFFE_CUDA_NUM_THREADS>>>(nthreads, bottom_data, bottom_label, acc_data, counts, outer_num_, dim, inner_num_, num_labels, top_k_, has_ignore_label_, ignore_label_); // get the overall accuracy Dtype acc; caffe_gpu_asum(bottom[0]->count(), acc_data, &acc); Dtype valid_count; caffe_gpu_asum(nums_buffer_.count(), counts, &valid_count); if (valid_count > 0) { top[0]->mutable_cpu_data()[0] = acc / valid_count; } else { top[0]->mutable_cpu_data()[0] = 0; } // get per-class accuracy Dtype* per_class_acc = top[1]->mutable_cpu_data(); for (int l = 0; l < num_labels; l++) { caffe_gpu_asum(nthreads, acc_data + l*nthreads, per_class_acc+l); caffe_gpu_asum(nthreads, counts + l*nthreads, &valid_count); if (valid_count > 0) { per_class_acc[l] /= valid_count; } else { per_class_acc[l] = 0; } } } } template <typename Dtype> void AccuracyLayer<Dtype>::Backward_gpu(const vector<Blob<Dtype>*>& top, const vector<bool>& propagate_down, const vector<Blob<Dtype>*>& bottom) { if (propagate_down[1]) { NOT_IMPLEMENTED; } } INSTANTIATE_LAYER_GPU_FUNCS(AccuracyLayer); } // namespace caffe
0985e0beffc78838b997ffb84c1e80a059c6cd83.hip
// !!! This is a file automatically generated by hipify!!! #define GLM_FORCE_CUDA #include <stdio.h> #include <hip/hip_runtime.h> #include <cmath> #include <glm/glm.hpp> #include "utilityCore.hpp" #include "kernel.h" #define checkCUDAErrorWithLine(msg) checkCUDAError(msg, __LINE__) /** * Check for CUDA errors; print and exit if there was a problem. */ void checkCUDAError(const char *msg, int line = -1) { hipError_t err = hipGetLastError(); if (hipSuccess != err) { if (line >= 0) { fprintf(stderr, "Line %d: ", line); } fprintf(stderr, "Cuda error: %s: %s.\n", msg, hipGetErrorString(err)); exit(EXIT_FAILURE); } } /***************** * Configuration * *****************/ /*! Block size used for CUDA kernel launch. */ #define blockSize 128 /*! Mass of one "planet." */ #define planetMass 3e8f /*! Mass of the "star" at the center. */ #define starMass 5e10f /*! Size of the starting area in simulation space. */ const float scene_scale = 1e2; /*********************************************** * Kernel state (pointers are device pointers) * ***********************************************/ int numObjects; dim3 threadsPerBlock(blockSize); glm::vec3 *dev_pos; glm::vec3 *dev_vel; glm::vec3 *dev_acc; /****************** * initSimulation * ******************/ __host__ __device__ unsigned int hash(unsigned int a) { a = (a + 0x7ed55d16) + (a << 12); a = (a ^ 0xc761c23c) ^ (a >> 19); a = (a + 0x165667b1) + (a << 5); a = (a + 0xd3a2646c) ^ (a << 9); a = (a + 0xfd7046c5) + (a << 3); a = (a ^ 0xb55a4f09) ^ (a >> 16); return a; } /** * Function for generating a random vec3. */ __host__ __device__ glm::vec3 generateRandomVec3(float time, int index) { thrust::default_random_engine rng(hash((int)(index * time))); thrust::uniform_real_distribution<float> unitDistrib(-1, 1); return glm::vec3((float)unitDistrib(rng), (float)unitDistrib(rng), (float)unitDistrib(rng)); } /** * CUDA kernel for generating planets with a specified mass randomly around the star. */ __global__ void kernGenerateRandomPosArray(int time, int N, glm::vec3 * arr, float scale, float mass) { int index = (blockIdx.x * blockDim.x) + threadIdx.x; if (index < N) { glm::vec3 rand = generateRandomVec3(time, index); arr[index].x = scale * rand.x; arr[index].y = scale * rand.y; arr[index].z = 0.1 * scale * sqrt(rand.x * rand.x + rand.y * rand.y) * rand.z; } } /** * CUDA kernel for generating velocities in a vortex around the origin. * This is just to make for an interesting-looking scene. */ __global__ void kernGenerateCircularVelArray(int time, int N, glm::vec3 * arr, glm::vec3 * pos) { int index = (blockIdx.x * blockDim.x) + threadIdx.x; if (index < N) { glm::vec3 R = glm::vec3(pos[index].x, pos[index].y, pos[index].z); float r = glm::length(R) + EPSILON; float s = sqrt(G * starMass / r); glm::vec3 D = glm::normalize(glm::cross(R / r, glm::vec3(0, 0, 1))); arr[index].x = s * D.x; arr[index].y = s * D.y; arr[index].z = s * D.z; } } /** * Initialize memory, update some globals */ void Nbody::initSimulation(int N) { numObjects = N; dim3 fullBlocksPerGrid((N + blockSize - 1) / blockSize); hipMalloc((void**)&dev_pos, N * sizeof(glm::vec3)); checkCUDAErrorWithLine("hipMalloc dev_pos failed!"); hipMalloc((void**)&dev_vel, N * sizeof(glm::vec3)); checkCUDAErrorWithLine("hipMalloc dev_vel failed!"); hipMalloc((void**)&dev_acc, N * sizeof(glm::vec3)); checkCUDAErrorWithLine("hipMalloc dev_acc failed!"); hipLaunchKernelGGL(( kernGenerateRandomPosArray), dim3(fullBlocksPerGrid), dim3(blockSize), 0, 0, 1, numObjects, dev_pos, scene_scale, planetMass); checkCUDAErrorWithLine("kernGenerateRandomPosArray failed!"); hipLaunchKernelGGL(( kernGenerateCircularVelArray), dim3(fullBlocksPerGrid), dim3(blockSize), 0, 0, 2, numObjects, dev_vel, dev_pos); checkCUDAErrorWithLine("kernGenerateCircularVelArray failed!"); hipDeviceSynchronize(); } /****************** * copyPlanetsToVBO * ******************/ /** * Copy the planet positions into the VBO so that they can be drawn by OpenGL. */ __global__ void kernCopyPlanetsToVBO(int N, glm::vec3 *pos, float *vbo, float s_scale) { int index = threadIdx.x + (blockIdx.x * blockDim.x); float c_scale = -1.0f / s_scale; if (index < N) { vbo[4 * index + 0] = pos[index].x * c_scale; vbo[4 * index + 1] = pos[index].y * c_scale; vbo[4 * index + 2] = pos[index].z * c_scale; vbo[4 * index + 3] = 1; } } /** * Wrapper for call to the kernCopyPlanetsToVBO CUDA kernel. */ void Nbody::copyPlanetsToVBO(float *vbodptr) { dim3 fullBlocksPerGrid((int)ceil(float(numObjects) / float(blockSize))); hipLaunchKernelGGL(( kernCopyPlanetsToVBO), dim3(fullBlocksPerGrid), dim3(blockSize), 0, 0, numObjects, dev_pos, vbodptr, scene_scale); checkCUDAErrorWithLine("copyPlanetsToVBO failed!"); hipDeviceSynchronize(); } /****************** * stepSimulation * ******************/ /** * Compute the acceleration on a body at `my_pos` due a single other body */ __device__ glm::vec3 gravitationalAccelerationHelper(glm::vec3 this_planet, glm::vec3 other_body, float other_body_mass) { // Check our distance squared calculation to protect against dividing by too small a number float distance_squared = glm::pow(glm::distance(this_planet, other_body), 2); if (glm::abs(distance_squared) < EPSILON) { distance_squared = EPSILON; } glm::vec3 unit_direction = glm::normalize(other_body - this_planet); float g = (G * other_body_mass) / distance_squared; return g * unit_direction; } /** * Compute the acceleration on a body at `my_pos` due to the `N` bodies in the array `other_planets`. */ __device__ glm::vec3 accelerate(int N, int iSelf, glm::vec3 this_planet, const glm::vec3 *other_planets) { // TODO: Compute the acceleration on `my_pos` due to: // * The star at the origin (with mass `starMass`) // * All of the *other* planets (with mass `planetMass`) // Return the sum of all of these contributions. // HINT: You may want to write a helper function that will compute the acceleration at // a single point due to a single other mass. Be careful that you protect against // division by very small numbers. // HINT: Use Newtonian gravitational acceleration: // G M // g = ----- // r^2 // where: // * G is the universal gravitational constant (already defined for you) // * M is the mass of the other object // * r is the distance between this object and the other object // * Also need to multiply the result by the unit vector of r from the other object to this planet // * And should this be negative? Because this is an attractive force glm::vec3 totalAcceleration = glm::vec3(0.0f); // Calculate acceleration relative to the start totalAcceleration += gravitationalAccelerationHelper(this_planet, glm::vec3(0.0f), starMass); // Calculate acceleration relative to the other planets for (int i = 0; i < N; i++) { if (i != iSelf) { totalAcceleration += gravitationalAccelerationHelper(this_planet, other_planets[i], planetMass); } } return totalAcceleration; } /** * For each of the `N` bodies, update its acceleration. * Compute the total instantaneous acceleration using `accelerate`, then store that into `acc`. */ __global__ void kernUpdateAcc(int N, float dt, const glm::vec3 *pos, glm::vec3 *acc) { // TODO: implement updateAccArray. // This function body runs once on each CUDA thread. // To avoid race conditions, each instance should only write ONE value to `acc`! int index = (blockIdx.x * blockDim.x) + threadIdx.x; acc[index] = accelerate(N, index, pos[index], pos); } /** * For each of the `N` bodies, update its velocity, then update its position, using a * simple Euler integration scheme. Acceleration must be updated before calling this kernel. */ __global__ void kernUpdateVelPos(int N, float dt, glm::vec3 *pos, glm::vec3 *vel, const glm::vec3 *acc) { // TODO: implement updateVelocityPosition int index = (blockIdx.x * blockDim.x) + threadIdx.x; vel[index] = vel[index] + acc[index] * dt; pos[index] = pos[index] + vel[index] * dt; } /** * Step the entire N-body simulation by `dt` seconds. */ void Nbody::stepSimulation(float dt) { // TODO: Using the CUDA kernels you wrote above, write a function that // calls the kernels to perform a full simulation step. dim3 fullBlocksPerGrid((numObjects + blockSize - 1) / blockSize); // Update acceleration hipLaunchKernelGGL(( kernUpdateAcc), dim3(fullBlocksPerGrid), dim3(blockSize), 0, 0, numObjects, dt, dev_pos, dev_acc); // Update velocity and position hipLaunchKernelGGL(( kernUpdateVelPos), dim3(fullBlocksPerGrid), dim3(blockSize), 0, 0, numObjects, dt, dev_pos, dev_vel, dev_acc); }
0985e0beffc78838b997ffb84c1e80a059c6cd83.cu
#define GLM_FORCE_CUDA #include <stdio.h> #include <cuda.h> #include <cmath> #include <glm/glm.hpp> #include "utilityCore.hpp" #include "kernel.h" #define checkCUDAErrorWithLine(msg) checkCUDAError(msg, __LINE__) /** * Check for CUDA errors; print and exit if there was a problem. */ void checkCUDAError(const char *msg, int line = -1) { cudaError_t err = cudaGetLastError(); if (cudaSuccess != err) { if (line >= 0) { fprintf(stderr, "Line %d: ", line); } fprintf(stderr, "Cuda error: %s: %s.\n", msg, cudaGetErrorString(err)); exit(EXIT_FAILURE); } } /***************** * Configuration * *****************/ /*! Block size used for CUDA kernel launch. */ #define blockSize 128 /*! Mass of one "planet." */ #define planetMass 3e8f /*! Mass of the "star" at the center. */ #define starMass 5e10f /*! Size of the starting area in simulation space. */ const float scene_scale = 1e2; /*********************************************** * Kernel state (pointers are device pointers) * ***********************************************/ int numObjects; dim3 threadsPerBlock(blockSize); glm::vec3 *dev_pos; glm::vec3 *dev_vel; glm::vec3 *dev_acc; /****************** * initSimulation * ******************/ __host__ __device__ unsigned int hash(unsigned int a) { a = (a + 0x7ed55d16) + (a << 12); a = (a ^ 0xc761c23c) ^ (a >> 19); a = (a + 0x165667b1) + (a << 5); a = (a + 0xd3a2646c) ^ (a << 9); a = (a + 0xfd7046c5) + (a << 3); a = (a ^ 0xb55a4f09) ^ (a >> 16); return a; } /** * Function for generating a random vec3. */ __host__ __device__ glm::vec3 generateRandomVec3(float time, int index) { thrust::default_random_engine rng(hash((int)(index * time))); thrust::uniform_real_distribution<float> unitDistrib(-1, 1); return glm::vec3((float)unitDistrib(rng), (float)unitDistrib(rng), (float)unitDistrib(rng)); } /** * CUDA kernel for generating planets with a specified mass randomly around the star. */ __global__ void kernGenerateRandomPosArray(int time, int N, glm::vec3 * arr, float scale, float mass) { int index = (blockIdx.x * blockDim.x) + threadIdx.x; if (index < N) { glm::vec3 rand = generateRandomVec3(time, index); arr[index].x = scale * rand.x; arr[index].y = scale * rand.y; arr[index].z = 0.1 * scale * sqrt(rand.x * rand.x + rand.y * rand.y) * rand.z; } } /** * CUDA kernel for generating velocities in a vortex around the origin. * This is just to make for an interesting-looking scene. */ __global__ void kernGenerateCircularVelArray(int time, int N, glm::vec3 * arr, glm::vec3 * pos) { int index = (blockIdx.x * blockDim.x) + threadIdx.x; if (index < N) { glm::vec3 R = glm::vec3(pos[index].x, pos[index].y, pos[index].z); float r = glm::length(R) + EPSILON; float s = sqrt(G * starMass / r); glm::vec3 D = glm::normalize(glm::cross(R / r, glm::vec3(0, 0, 1))); arr[index].x = s * D.x; arr[index].y = s * D.y; arr[index].z = s * D.z; } } /** * Initialize memory, update some globals */ void Nbody::initSimulation(int N) { numObjects = N; dim3 fullBlocksPerGrid((N + blockSize - 1) / blockSize); cudaMalloc((void**)&dev_pos, N * sizeof(glm::vec3)); checkCUDAErrorWithLine("cudaMalloc dev_pos failed!"); cudaMalloc((void**)&dev_vel, N * sizeof(glm::vec3)); checkCUDAErrorWithLine("cudaMalloc dev_vel failed!"); cudaMalloc((void**)&dev_acc, N * sizeof(glm::vec3)); checkCUDAErrorWithLine("cudaMalloc dev_acc failed!"); kernGenerateRandomPosArray<<<fullBlocksPerGrid, blockSize>>>(1, numObjects, dev_pos, scene_scale, planetMass); checkCUDAErrorWithLine("kernGenerateRandomPosArray failed!"); kernGenerateCircularVelArray<<<fullBlocksPerGrid, blockSize>>>(2, numObjects, dev_vel, dev_pos); checkCUDAErrorWithLine("kernGenerateCircularVelArray failed!"); cudaThreadSynchronize(); } /****************** * copyPlanetsToVBO * ******************/ /** * Copy the planet positions into the VBO so that they can be drawn by OpenGL. */ __global__ void kernCopyPlanetsToVBO(int N, glm::vec3 *pos, float *vbo, float s_scale) { int index = threadIdx.x + (blockIdx.x * blockDim.x); float c_scale = -1.0f / s_scale; if (index < N) { vbo[4 * index + 0] = pos[index].x * c_scale; vbo[4 * index + 1] = pos[index].y * c_scale; vbo[4 * index + 2] = pos[index].z * c_scale; vbo[4 * index + 3] = 1; } } /** * Wrapper for call to the kernCopyPlanetsToVBO CUDA kernel. */ void Nbody::copyPlanetsToVBO(float *vbodptr) { dim3 fullBlocksPerGrid((int)ceil(float(numObjects) / float(blockSize))); kernCopyPlanetsToVBO<<<fullBlocksPerGrid, blockSize>>>(numObjects, dev_pos, vbodptr, scene_scale); checkCUDAErrorWithLine("copyPlanetsToVBO failed!"); cudaThreadSynchronize(); } /****************** * stepSimulation * ******************/ /** * Compute the acceleration on a body at `my_pos` due a single other body */ __device__ glm::vec3 gravitationalAccelerationHelper(glm::vec3 this_planet, glm::vec3 other_body, float other_body_mass) { // Check our distance squared calculation to protect against dividing by too small a number float distance_squared = glm::pow(glm::distance(this_planet, other_body), 2); if (glm::abs(distance_squared) < EPSILON) { distance_squared = EPSILON; } glm::vec3 unit_direction = glm::normalize(other_body - this_planet); float g = (G * other_body_mass) / distance_squared; return g * unit_direction; } /** * Compute the acceleration on a body at `my_pos` due to the `N` bodies in the array `other_planets`. */ __device__ glm::vec3 accelerate(int N, int iSelf, glm::vec3 this_planet, const glm::vec3 *other_planets) { // TODO: Compute the acceleration on `my_pos` due to: // * The star at the origin (with mass `starMass`) // * All of the *other* planets (with mass `planetMass`) // Return the sum of all of these contributions. // HINT: You may want to write a helper function that will compute the acceleration at // a single point due to a single other mass. Be careful that you protect against // division by very small numbers. // HINT: Use Newtonian gravitational acceleration: // G M // g = ----- // r^2 // where: // * G is the universal gravitational constant (already defined for you) // * M is the mass of the other object // * r is the distance between this object and the other object // * Also need to multiply the result by the unit vector of r from the other object to this planet // * And should this be negative? Because this is an attractive force glm::vec3 totalAcceleration = glm::vec3(0.0f); // Calculate acceleration relative to the start totalAcceleration += gravitationalAccelerationHelper(this_planet, glm::vec3(0.0f), starMass); // Calculate acceleration relative to the other planets for (int i = 0; i < N; i++) { if (i != iSelf) { totalAcceleration += gravitationalAccelerationHelper(this_planet, other_planets[i], planetMass); } } return totalAcceleration; } /** * For each of the `N` bodies, update its acceleration. * Compute the total instantaneous acceleration using `accelerate`, then store that into `acc`. */ __global__ void kernUpdateAcc(int N, float dt, const glm::vec3 *pos, glm::vec3 *acc) { // TODO: implement updateAccArray. // This function body runs once on each CUDA thread. // To avoid race conditions, each instance should only write ONE value to `acc`! int index = (blockIdx.x * blockDim.x) + threadIdx.x; acc[index] = accelerate(N, index, pos[index], pos); } /** * For each of the `N` bodies, update its velocity, then update its position, using a * simple Euler integration scheme. Acceleration must be updated before calling this kernel. */ __global__ void kernUpdateVelPos(int N, float dt, glm::vec3 *pos, glm::vec3 *vel, const glm::vec3 *acc) { // TODO: implement updateVelocityPosition int index = (blockIdx.x * blockDim.x) + threadIdx.x; vel[index] = vel[index] + acc[index] * dt; pos[index] = pos[index] + vel[index] * dt; } /** * Step the entire N-body simulation by `dt` seconds. */ void Nbody::stepSimulation(float dt) { // TODO: Using the CUDA kernels you wrote above, write a function that // calls the kernels to perform a full simulation step. dim3 fullBlocksPerGrid((numObjects + blockSize - 1) / blockSize); // Update acceleration kernUpdateAcc<<<fullBlocksPerGrid, blockSize>>>(numObjects, dt, dev_pos, dev_acc); // Update velocity and position kernUpdateVelPos<<<fullBlocksPerGrid, blockSize>>>(numObjects, dt, dev_pos, dev_vel, dev_acc); }
2699f988e1a815c5316e0f0cc1f4321b19d04d6f.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include "DataVolumeCUDA.h" #include "float_utils.h" #include "uint_utils.h" #include "ThresholdingKernels.cu" #include <stdio.h> #include <stdlib.h> template< class TYPEd, class UINTd, class FLOATd > void binary_threshold( DataVolumeCUDA<TYPEd, UINTd, FLOATd> *image, TYPEd threshold, TYPEd low_val, TYPEd high_val ) { // Setup dimensions of grid/blocks. dim3 blockDim(512,1,1); dim3 gridDim((unsigned int) ceil((double)(prod(image->dims)/blockDim.x)), 1, 1 ); // Invoke kernel hipLaunchKernelGGL(( binary_threshold_kernel), dim3(gridDim), dim3(blockDim) , 0, 0, image->data, image->dims, threshold, low_val, high_val ); hipError_t err = hipGetLastError(); if( err != hipSuccess ){ printf("\nCuda error detected in 'binary_threshold': %s. Quitting.\n", hipGetErrorString(err) ); fflush(stdout); exit(1); } } template void binary_threshold( DataVolumeCUDA<float, uint3, float3> *image, float threshold, float low_val, float high_val );
2699f988e1a815c5316e0f0cc1f4321b19d04d6f.cu
#include "DataVolumeCUDA.h" #include "float_utils.h" #include "uint_utils.h" #include "ThresholdingKernels.cu" #include <stdio.h> #include <stdlib.h> template< class TYPEd, class UINTd, class FLOATd > void binary_threshold( DataVolumeCUDA<TYPEd, UINTd, FLOATd> *image, TYPEd threshold, TYPEd low_val, TYPEd high_val ) { // Setup dimensions of grid/blocks. dim3 blockDim(512,1,1); dim3 gridDim((unsigned int) ceil((double)(prod(image->dims)/blockDim.x)), 1, 1 ); // Invoke kernel binary_threshold_kernel<<< gridDim, blockDim >>>( image->data, image->dims, threshold, low_val, high_val ); cudaError_t err = cudaGetLastError(); if( err != cudaSuccess ){ printf("\nCuda error detected in 'binary_threshold': %s. Quitting.\n", cudaGetErrorString(err) ); fflush(stdout); exit(1); } } template void binary_threshold( DataVolumeCUDA<float, uint3, float3> *image, float threshold, float low_val, float high_val );
bb0fa809035c0b0b0beeb6c9a92984feaa3a1299.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #ifndef THC_GENERIC_FILE #define THC_GENERIC_FILE "generic/SpatialUpSamplingBilinear.cu" #else #include "../linear_upsampling.h" static inline void THNN_(SpatialUpSamplingBilinear_shapeCheck) (THCState *state, THCTensor *input, THCTensor *gradOutput, int nBatch, int nChannels, int inputHeight, int inputWidth, int outputHeight, int outputWidth) { THArgCheck(inputHeight > 0 && inputWidth > 0 && outputHeight > 0 && outputWidth > 0, 2, "input and output sizes should be greater than 0," " but got input (H: %d, W: %d) output (H: %d, W: %d)", inputHeight, inputWidth, outputHeight, outputWidth); if (input != NULL) { THCUNN_argCheck(state, !input->is_empty() && input->dim() == 4, 2, input, "non-empty 4D input tensor expected but got: %s"); } if (gradOutput != NULL) { THCUNN_check_dim_size(state, gradOutput, 4, 0, nBatch); THCUNN_check_dim_size(state, gradOutput, 4, 1, nChannels); THCUNN_check_dim_size(state, gradOutput, 4, 2, outputHeight); THCUNN_check_dim_size(state, gradOutput, 4, 3, outputWidth); } } void THNN_(SpatialUpSamplingBilinear_updateOutput)( THCState *state, THCTensor *input, THCTensor *output, int outputHeight, int outputWidth, bool align_corners) { int nbatch = THCTensor_(size)(state, input, 0); int channels = THCTensor_(size)(state, input, 1); int inputHeight = THCTensor_(size)(state, input, 2); int inputWidth = THCTensor_(size)(state, input, 3); THNN_(SpatialUpSamplingBilinear_shapeCheck) (state, input, NULL, nbatch, channels, inputHeight, inputWidth, outputHeight, outputWidth); THCUNN_assertSameGPU(state, 2, input, output); THCTensor_(resize4d)(state, output, THCTensor_(size)(state, input, 0), THCTensor_(size)(state, input, 1), outputHeight, outputWidth); THCTensor_(zero)(state, output); THCDeviceTensor<scalar_t, 4> idata = toDeviceTensor<scalar_t, 4>(state, input); THCDeviceTensor<scalar_t, 4> odata = toDeviceTensor<scalar_t, 4>(state, output); THAssert(inputHeight > 0 && inputWidth > 0 && outputHeight > 0 && outputWidth > 0); const accreal rheight = linear_upsampling_compute_scale<accreal>(inputHeight, outputHeight, align_corners); const accreal rwidth = linear_upsampling_compute_scale<accreal>(inputWidth, outputWidth, align_corners); const int num_kernels = outputHeight * outputWidth; const int num_threads = THCState_getCurrentDeviceProperties(state)->maxThreadsPerBlock; hipStream_t stream = THCState_getCurrentStream(state); hipLaunchKernelGGL(( caffe_gpu_interp2_kernel<scalar_t, accreal>) , dim3(THCCeilDiv(num_kernels, num_threads)), dim3(num_threads) , 0 , stream, num_kernels, rheight, rwidth, align_corners, idata, odata); THCudaCheck(hipGetLastError()); } void THNN_(SpatialUpSamplingBilinear_updateGradInput)( THCState *state, THCTensor *gradOutput, THCTensor *gradInput, int nbatch, int nchannels, int inputHeight, int inputWidth, int outputHeight, int outputWidth, bool align_corners) { THNN_(SpatialUpSamplingBilinear_shapeCheck) (state, NULL, gradOutput, nbatch, nchannels, inputHeight, inputWidth, outputHeight, outputWidth); gradOutput = THCTensor_(newContiguous)(state, gradOutput); THCUNN_assertSameGPU(state, 2, gradOutput, gradInput); THCTensor_(resize4d)(state, gradInput, nbatch, nchannels, inputHeight, inputWidth); THCTensor_(zero)(state, gradInput); THCDeviceTensor<scalar_t, 4> data1 = toDeviceTensor<scalar_t, 4>(state, gradInput); THCDeviceTensor<scalar_t, 4> data2 = toDeviceTensor<scalar_t, 4>(state, gradOutput); const accreal rheight = linear_upsampling_compute_scale<accreal>(inputHeight, outputHeight, align_corners); const accreal rwidth = linear_upsampling_compute_scale<accreal>(inputWidth, outputWidth, align_corners); const int num_kernels = outputHeight * outputWidth; const int num_threads = THCState_getCurrentDeviceProperties(state)->maxThreadsPerBlock; hipStream_t stream = THCState_getCurrentStream(state); hipLaunchKernelGGL(( caffe_gpu_interp2_kernel_backward<scalar_t ,accreal>) , dim3(THCCeilDiv(num_kernels, num_threads)), dim3(num_threads), 0, stream, num_kernels, rheight, rwidth, align_corners, data1, data2); THCudaCheck(hipGetLastError()); THCTensor_(free)(state, gradOutput); } #endif
bb0fa809035c0b0b0beeb6c9a92984feaa3a1299.cu
#ifndef THC_GENERIC_FILE #define THC_GENERIC_FILE "generic/SpatialUpSamplingBilinear.cu" #else #include "../linear_upsampling.h" static inline void THNN_(SpatialUpSamplingBilinear_shapeCheck) (THCState *state, THCTensor *input, THCTensor *gradOutput, int nBatch, int nChannels, int inputHeight, int inputWidth, int outputHeight, int outputWidth) { THArgCheck(inputHeight > 0 && inputWidth > 0 && outputHeight > 0 && outputWidth > 0, 2, "input and output sizes should be greater than 0," " but got input (H: %d, W: %d) output (H: %d, W: %d)", inputHeight, inputWidth, outputHeight, outputWidth); if (input != NULL) { THCUNN_argCheck(state, !input->is_empty() && input->dim() == 4, 2, input, "non-empty 4D input tensor expected but got: %s"); } if (gradOutput != NULL) { THCUNN_check_dim_size(state, gradOutput, 4, 0, nBatch); THCUNN_check_dim_size(state, gradOutput, 4, 1, nChannels); THCUNN_check_dim_size(state, gradOutput, 4, 2, outputHeight); THCUNN_check_dim_size(state, gradOutput, 4, 3, outputWidth); } } void THNN_(SpatialUpSamplingBilinear_updateOutput)( THCState *state, THCTensor *input, THCTensor *output, int outputHeight, int outputWidth, bool align_corners) { int nbatch = THCTensor_(size)(state, input, 0); int channels = THCTensor_(size)(state, input, 1); int inputHeight = THCTensor_(size)(state, input, 2); int inputWidth = THCTensor_(size)(state, input, 3); THNN_(SpatialUpSamplingBilinear_shapeCheck) (state, input, NULL, nbatch, channels, inputHeight, inputWidth, outputHeight, outputWidth); THCUNN_assertSameGPU(state, 2, input, output); THCTensor_(resize4d)(state, output, THCTensor_(size)(state, input, 0), THCTensor_(size)(state, input, 1), outputHeight, outputWidth); THCTensor_(zero)(state, output); THCDeviceTensor<scalar_t, 4> idata = toDeviceTensor<scalar_t, 4>(state, input); THCDeviceTensor<scalar_t, 4> odata = toDeviceTensor<scalar_t, 4>(state, output); THAssert(inputHeight > 0 && inputWidth > 0 && outputHeight > 0 && outputWidth > 0); const accreal rheight = linear_upsampling_compute_scale<accreal>(inputHeight, outputHeight, align_corners); const accreal rwidth = linear_upsampling_compute_scale<accreal>(inputWidth, outputWidth, align_corners); const int num_kernels = outputHeight * outputWidth; const int num_threads = THCState_getCurrentDeviceProperties(state)->maxThreadsPerBlock; cudaStream_t stream = THCState_getCurrentStream(state); caffe_gpu_interp2_kernel<scalar_t, accreal> <<<THCCeilDiv(num_kernels, num_threads), num_threads , 0 , stream>>>(num_kernels, rheight, rwidth, align_corners, idata, odata); THCudaCheck(cudaGetLastError()); } void THNN_(SpatialUpSamplingBilinear_updateGradInput)( THCState *state, THCTensor *gradOutput, THCTensor *gradInput, int nbatch, int nchannels, int inputHeight, int inputWidth, int outputHeight, int outputWidth, bool align_corners) { THNN_(SpatialUpSamplingBilinear_shapeCheck) (state, NULL, gradOutput, nbatch, nchannels, inputHeight, inputWidth, outputHeight, outputWidth); gradOutput = THCTensor_(newContiguous)(state, gradOutput); THCUNN_assertSameGPU(state, 2, gradOutput, gradInput); THCTensor_(resize4d)(state, gradInput, nbatch, nchannels, inputHeight, inputWidth); THCTensor_(zero)(state, gradInput); THCDeviceTensor<scalar_t, 4> data1 = toDeviceTensor<scalar_t, 4>(state, gradInput); THCDeviceTensor<scalar_t, 4> data2 = toDeviceTensor<scalar_t, 4>(state, gradOutput); const accreal rheight = linear_upsampling_compute_scale<accreal>(inputHeight, outputHeight, align_corners); const accreal rwidth = linear_upsampling_compute_scale<accreal>(inputWidth, outputWidth, align_corners); const int num_kernels = outputHeight * outputWidth; const int num_threads = THCState_getCurrentDeviceProperties(state)->maxThreadsPerBlock; cudaStream_t stream = THCState_getCurrentStream(state); caffe_gpu_interp2_kernel_backward<scalar_t ,accreal> <<<THCCeilDiv(num_kernels, num_threads), num_threads, 0, stream>>>(num_kernels, rheight, rwidth, align_corners, data1, data2); THCudaCheck(cudaGetLastError()); THCTensor_(free)(state, gradOutput); } #endif
b7879a692587e07c9c91de750d321e4956b91f24.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" // // auto-generated by ops.py // __constant__ int dims_tea_leaf_ppcg_init2_kernel [5][1]; static int dims_tea_leaf_ppcg_init2_kernel_h [5][1] = {0}; //user function __device__ void tea_leaf_ppcg_init2_kernel_gpu(ACC<double> &sd, ACC<double> &rtemp, ACC<double> &utemp, const ACC<double> &r, const double *theta_r) { sd(0,0) = r(0,0)*(*theta_r); rtemp(0,0) = r(0,0); utemp(0,0) = sd(0,0); } __global__ void ops_tea_leaf_ppcg_init2_kernel( double* __restrict arg0, double* __restrict arg1, double* __restrict arg2, double* __restrict arg3, const double arg4, int size0, int size1 ){ int idx_y = blockDim.y * blockIdx.y + threadIdx.y; int idx_x = blockDim.x * blockIdx.x + threadIdx.x; arg0 += idx_x * 1*1 + idx_y * 1*1 * dims_tea_leaf_ppcg_init2_kernel[0][0]; arg1 += idx_x * 1*1 + idx_y * 1*1 * dims_tea_leaf_ppcg_init2_kernel[1][0]; arg2 += idx_x * 1*1 + idx_y * 1*1 * dims_tea_leaf_ppcg_init2_kernel[2][0]; arg3 += idx_x * 1*1 + idx_y * 1*1 * dims_tea_leaf_ppcg_init2_kernel[3][0]; if (idx_x < size0 && idx_y < size1) { ACC<double> argp0(dims_tea_leaf_ppcg_init2_kernel[0][0], arg0); ACC<double> argp1(dims_tea_leaf_ppcg_init2_kernel[1][0], arg1); ACC<double> argp2(dims_tea_leaf_ppcg_init2_kernel[2][0], arg2); const ACC<double> argp3(dims_tea_leaf_ppcg_init2_kernel[3][0], arg3); tea_leaf_ppcg_init2_kernel_gpu(argp0, argp1, argp2, argp3, &arg4); } } // host stub function #ifndef OPS_LAZY void ops_par_loop_tea_leaf_ppcg_init2_kernel(char const *name, ops_block block, int dim, int* range, ops_arg arg0, ops_arg arg1, ops_arg arg2, ops_arg arg3, ops_arg arg4) { #else void ops_par_loop_tea_leaf_ppcg_init2_kernel_execute(ops_kernel_descriptor *desc) { int dim = desc->dim; #if OPS_MPI ops_block block = desc->block; #endif int *range = desc->range; ops_arg arg0 = desc->args[0]; ops_arg arg1 = desc->args[1]; ops_arg arg2 = desc->args[2]; ops_arg arg3 = desc->args[3]; ops_arg arg4 = desc->args[4]; #endif //Timing double t1,t2,c1,c2; ops_arg args[5] = { arg0, arg1, arg2, arg3, arg4}; #if CHECKPOINTING && !OPS_LAZY if (!ops_checkpointing_before(args,5,range,44)) return; #endif if (OPS_diags > 1) { ops_timing_realloc(44,"tea_leaf_ppcg_init2_kernel"); OPS_kernels[44].count++; ops_timers_core(&c1,&t1); } //compute locally allocated range for the sub-block int start[2]; int end[2]; #if OPS_MPI && !OPS_LAZY sub_block_list sb = OPS_sub_block_list[block->index]; #endif //OPS_MPI #ifdef OPS_MPI int arg_idx[2]; #endif #ifdef OPS_MPI if (compute_ranges(args, 5,block, range, start, end, arg_idx) < 0) return; #else //OPS_MPI for ( int n=0; n<2; n++ ){ start[n] = range[2*n];end[n] = range[2*n+1]; } #endif int xdim0 = args[0].dat->size[0]; int xdim1 = args[1].dat->size[0]; int xdim2 = args[2].dat->size[0]; int xdim3 = args[3].dat->size[0]; if (xdim0 != dims_tea_leaf_ppcg_init2_kernel_h[0][0] || xdim1 != dims_tea_leaf_ppcg_init2_kernel_h[1][0] || xdim2 != dims_tea_leaf_ppcg_init2_kernel_h[2][0] || xdim3 != dims_tea_leaf_ppcg_init2_kernel_h[3][0]) { dims_tea_leaf_ppcg_init2_kernel_h[0][0] = xdim0; dims_tea_leaf_ppcg_init2_kernel_h[1][0] = xdim1; dims_tea_leaf_ppcg_init2_kernel_h[2][0] = xdim2; dims_tea_leaf_ppcg_init2_kernel_h[3][0] = xdim3; cutilSafeCall(hipMemcpyToSymbol( dims_tea_leaf_ppcg_init2_kernel, dims_tea_leaf_ppcg_init2_kernel_h, sizeof(dims_tea_leaf_ppcg_init2_kernel))); } int x_size = MAX(0,end[0]-start[0]); int y_size = MAX(0,end[1]-start[1]); dim3 grid( (x_size-1)/OPS_block_size_x+ 1, (y_size-1)/OPS_block_size_y + 1, 1); dim3 tblock(OPS_block_size_x,OPS_block_size_y,OPS_block_size_z); int dat0 = (OPS_soa ? args[0].dat->type_size : args[0].dat->elem_size); int dat1 = (OPS_soa ? args[1].dat->type_size : args[1].dat->elem_size); int dat2 = (OPS_soa ? args[2].dat->type_size : args[2].dat->elem_size); int dat3 = (OPS_soa ? args[3].dat->type_size : args[3].dat->elem_size); char *p_a[5]; //set up initial pointers int base0 = args[0].dat->base_offset + dat0 * 1 * (start[0] * args[0].stencil->stride[0]); base0 = base0+ dat0 * args[0].dat->size[0] * (start[1] * args[0].stencil->stride[1]); p_a[0] = (char *)args[0].data_d + base0; int base1 = args[1].dat->base_offset + dat1 * 1 * (start[0] * args[1].stencil->stride[0]); base1 = base1+ dat1 * args[1].dat->size[0] * (start[1] * args[1].stencil->stride[1]); p_a[1] = (char *)args[1].data_d + base1; int base2 = args[2].dat->base_offset + dat2 * 1 * (start[0] * args[2].stencil->stride[0]); base2 = base2+ dat2 * args[2].dat->size[0] * (start[1] * args[2].stencil->stride[1]); p_a[2] = (char *)args[2].data_d + base2; int base3 = args[3].dat->base_offset + dat3 * 1 * (start[0] * args[3].stencil->stride[0]); base3 = base3+ dat3 * args[3].dat->size[0] * (start[1] * args[3].stencil->stride[1]); p_a[3] = (char *)args[3].data_d + base3; #ifndef OPS_LAZY ops_H_D_exchanges_device(args, 5); ops_halo_exchanges(args,5,range); #endif if (OPS_diags > 1) { ops_timers_core(&c2,&t2); OPS_kernels[44].mpi_time += t2-t1; } //call kernel wrapper function, passing in pointers to data if (x_size > 0 && y_size > 0) hipLaunchKernelGGL(( ops_tea_leaf_ppcg_init2_kernel), dim3(grid), dim3(tblock) , 0, 0, (double *)p_a[0], (double *)p_a[1], (double *)p_a[2], (double *)p_a[3], *(double *)arg4.data,x_size, y_size); cutilSafeCall(hipGetLastError()); if (OPS_diags>1) { cutilSafeCall(hipDeviceSynchronize()); ops_timers_core(&c1,&t1); OPS_kernels[44].time += t1-t2; } #ifndef OPS_LAZY ops_set_dirtybit_device(args, 5); ops_set_halo_dirtybit3(&args[0],range); ops_set_halo_dirtybit3(&args[1],range); ops_set_halo_dirtybit3(&args[2],range); #endif if (OPS_diags > 1) { //Update kernel record ops_timers_core(&c2,&t2); OPS_kernels[44].mpi_time += t2-t1; OPS_kernels[44].transfer += ops_compute_transfer(dim, start, end, &arg0); OPS_kernels[44].transfer += ops_compute_transfer(dim, start, end, &arg1); OPS_kernels[44].transfer += ops_compute_transfer(dim, start, end, &arg2); OPS_kernels[44].transfer += ops_compute_transfer(dim, start, end, &arg3); } } #ifdef OPS_LAZY void ops_par_loop_tea_leaf_ppcg_init2_kernel(char const *name, ops_block block, int dim, int* range, ops_arg arg0, ops_arg arg1, ops_arg arg2, ops_arg arg3, ops_arg arg4) { ops_kernel_descriptor *desc = (ops_kernel_descriptor *)malloc(sizeof(ops_kernel_descriptor)); desc->name = name; desc->block = block; desc->dim = dim; desc->device = 1; desc->index = 44; desc->hash = 5381; desc->hash = ((desc->hash << 5) + desc->hash) + 44; for ( int i=0; i<4; i++ ){ desc->range[i] = range[i]; desc->orig_range[i] = range[i]; desc->hash = ((desc->hash << 5) + desc->hash) + range[i]; } desc->nargs = 5; desc->args = (ops_arg*)malloc(5*sizeof(ops_arg)); desc->args[0] = arg0; desc->hash = ((desc->hash << 5) + desc->hash) + arg0.dat->index; desc->args[1] = arg1; desc->hash = ((desc->hash << 5) + desc->hash) + arg1.dat->index; desc->args[2] = arg2; desc->hash = ((desc->hash << 5) + desc->hash) + arg2.dat->index; desc->args[3] = arg3; desc->hash = ((desc->hash << 5) + desc->hash) + arg3.dat->index; desc->args[4] = arg4; char *tmp = (char*)malloc(1*sizeof(double)); memcpy(tmp, arg4.data,1*sizeof(double)); desc->args[4].data = tmp; desc->function = ops_par_loop_tea_leaf_ppcg_init2_kernel_execute; if (OPS_diags > 1) { ops_timing_realloc(44,"tea_leaf_ppcg_init2_kernel"); } ops_enqueue_kernel(desc); } #endif
b7879a692587e07c9c91de750d321e4956b91f24.cu
// // auto-generated by ops.py // __constant__ int dims_tea_leaf_ppcg_init2_kernel [5][1]; static int dims_tea_leaf_ppcg_init2_kernel_h [5][1] = {0}; //user function __device__ void tea_leaf_ppcg_init2_kernel_gpu(ACC<double> &sd, ACC<double> &rtemp, ACC<double> &utemp, const ACC<double> &r, const double *theta_r) { sd(0,0) = r(0,0)*(*theta_r); rtemp(0,0) = r(0,0); utemp(0,0) = sd(0,0); } __global__ void ops_tea_leaf_ppcg_init2_kernel( double* __restrict arg0, double* __restrict arg1, double* __restrict arg2, double* __restrict arg3, const double arg4, int size0, int size1 ){ int idx_y = blockDim.y * blockIdx.y + threadIdx.y; int idx_x = blockDim.x * blockIdx.x + threadIdx.x; arg0 += idx_x * 1*1 + idx_y * 1*1 * dims_tea_leaf_ppcg_init2_kernel[0][0]; arg1 += idx_x * 1*1 + idx_y * 1*1 * dims_tea_leaf_ppcg_init2_kernel[1][0]; arg2 += idx_x * 1*1 + idx_y * 1*1 * dims_tea_leaf_ppcg_init2_kernel[2][0]; arg3 += idx_x * 1*1 + idx_y * 1*1 * dims_tea_leaf_ppcg_init2_kernel[3][0]; if (idx_x < size0 && idx_y < size1) { ACC<double> argp0(dims_tea_leaf_ppcg_init2_kernel[0][0], arg0); ACC<double> argp1(dims_tea_leaf_ppcg_init2_kernel[1][0], arg1); ACC<double> argp2(dims_tea_leaf_ppcg_init2_kernel[2][0], arg2); const ACC<double> argp3(dims_tea_leaf_ppcg_init2_kernel[3][0], arg3); tea_leaf_ppcg_init2_kernel_gpu(argp0, argp1, argp2, argp3, &arg4); } } // host stub function #ifndef OPS_LAZY void ops_par_loop_tea_leaf_ppcg_init2_kernel(char const *name, ops_block block, int dim, int* range, ops_arg arg0, ops_arg arg1, ops_arg arg2, ops_arg arg3, ops_arg arg4) { #else void ops_par_loop_tea_leaf_ppcg_init2_kernel_execute(ops_kernel_descriptor *desc) { int dim = desc->dim; #if OPS_MPI ops_block block = desc->block; #endif int *range = desc->range; ops_arg arg0 = desc->args[0]; ops_arg arg1 = desc->args[1]; ops_arg arg2 = desc->args[2]; ops_arg arg3 = desc->args[3]; ops_arg arg4 = desc->args[4]; #endif //Timing double t1,t2,c1,c2; ops_arg args[5] = { arg0, arg1, arg2, arg3, arg4}; #if CHECKPOINTING && !OPS_LAZY if (!ops_checkpointing_before(args,5,range,44)) return; #endif if (OPS_diags > 1) { ops_timing_realloc(44,"tea_leaf_ppcg_init2_kernel"); OPS_kernels[44].count++; ops_timers_core(&c1,&t1); } //compute locally allocated range for the sub-block int start[2]; int end[2]; #if OPS_MPI && !OPS_LAZY sub_block_list sb = OPS_sub_block_list[block->index]; #endif //OPS_MPI #ifdef OPS_MPI int arg_idx[2]; #endif #ifdef OPS_MPI if (compute_ranges(args, 5,block, range, start, end, arg_idx) < 0) return; #else //OPS_MPI for ( int n=0; n<2; n++ ){ start[n] = range[2*n];end[n] = range[2*n+1]; } #endif int xdim0 = args[0].dat->size[0]; int xdim1 = args[1].dat->size[0]; int xdim2 = args[2].dat->size[0]; int xdim3 = args[3].dat->size[0]; if (xdim0 != dims_tea_leaf_ppcg_init2_kernel_h[0][0] || xdim1 != dims_tea_leaf_ppcg_init2_kernel_h[1][0] || xdim2 != dims_tea_leaf_ppcg_init2_kernel_h[2][0] || xdim3 != dims_tea_leaf_ppcg_init2_kernel_h[3][0]) { dims_tea_leaf_ppcg_init2_kernel_h[0][0] = xdim0; dims_tea_leaf_ppcg_init2_kernel_h[1][0] = xdim1; dims_tea_leaf_ppcg_init2_kernel_h[2][0] = xdim2; dims_tea_leaf_ppcg_init2_kernel_h[3][0] = xdim3; cutilSafeCall(cudaMemcpyToSymbol( dims_tea_leaf_ppcg_init2_kernel, dims_tea_leaf_ppcg_init2_kernel_h, sizeof(dims_tea_leaf_ppcg_init2_kernel))); } int x_size = MAX(0,end[0]-start[0]); int y_size = MAX(0,end[1]-start[1]); dim3 grid( (x_size-1)/OPS_block_size_x+ 1, (y_size-1)/OPS_block_size_y + 1, 1); dim3 tblock(OPS_block_size_x,OPS_block_size_y,OPS_block_size_z); int dat0 = (OPS_soa ? args[0].dat->type_size : args[0].dat->elem_size); int dat1 = (OPS_soa ? args[1].dat->type_size : args[1].dat->elem_size); int dat2 = (OPS_soa ? args[2].dat->type_size : args[2].dat->elem_size); int dat3 = (OPS_soa ? args[3].dat->type_size : args[3].dat->elem_size); char *p_a[5]; //set up initial pointers int base0 = args[0].dat->base_offset + dat0 * 1 * (start[0] * args[0].stencil->stride[0]); base0 = base0+ dat0 * args[0].dat->size[0] * (start[1] * args[0].stencil->stride[1]); p_a[0] = (char *)args[0].data_d + base0; int base1 = args[1].dat->base_offset + dat1 * 1 * (start[0] * args[1].stencil->stride[0]); base1 = base1+ dat1 * args[1].dat->size[0] * (start[1] * args[1].stencil->stride[1]); p_a[1] = (char *)args[1].data_d + base1; int base2 = args[2].dat->base_offset + dat2 * 1 * (start[0] * args[2].stencil->stride[0]); base2 = base2+ dat2 * args[2].dat->size[0] * (start[1] * args[2].stencil->stride[1]); p_a[2] = (char *)args[2].data_d + base2; int base3 = args[3].dat->base_offset + dat3 * 1 * (start[0] * args[3].stencil->stride[0]); base3 = base3+ dat3 * args[3].dat->size[0] * (start[1] * args[3].stencil->stride[1]); p_a[3] = (char *)args[3].data_d + base3; #ifndef OPS_LAZY ops_H_D_exchanges_device(args, 5); ops_halo_exchanges(args,5,range); #endif if (OPS_diags > 1) { ops_timers_core(&c2,&t2); OPS_kernels[44].mpi_time += t2-t1; } //call kernel wrapper function, passing in pointers to data if (x_size > 0 && y_size > 0) ops_tea_leaf_ppcg_init2_kernel<<<grid, tblock >>> ( (double *)p_a[0], (double *)p_a[1], (double *)p_a[2], (double *)p_a[3], *(double *)arg4.data,x_size, y_size); cutilSafeCall(cudaGetLastError()); if (OPS_diags>1) { cutilSafeCall(cudaDeviceSynchronize()); ops_timers_core(&c1,&t1); OPS_kernels[44].time += t1-t2; } #ifndef OPS_LAZY ops_set_dirtybit_device(args, 5); ops_set_halo_dirtybit3(&args[0],range); ops_set_halo_dirtybit3(&args[1],range); ops_set_halo_dirtybit3(&args[2],range); #endif if (OPS_diags > 1) { //Update kernel record ops_timers_core(&c2,&t2); OPS_kernels[44].mpi_time += t2-t1; OPS_kernels[44].transfer += ops_compute_transfer(dim, start, end, &arg0); OPS_kernels[44].transfer += ops_compute_transfer(dim, start, end, &arg1); OPS_kernels[44].transfer += ops_compute_transfer(dim, start, end, &arg2); OPS_kernels[44].transfer += ops_compute_transfer(dim, start, end, &arg3); } } #ifdef OPS_LAZY void ops_par_loop_tea_leaf_ppcg_init2_kernel(char const *name, ops_block block, int dim, int* range, ops_arg arg0, ops_arg arg1, ops_arg arg2, ops_arg arg3, ops_arg arg4) { ops_kernel_descriptor *desc = (ops_kernel_descriptor *)malloc(sizeof(ops_kernel_descriptor)); desc->name = name; desc->block = block; desc->dim = dim; desc->device = 1; desc->index = 44; desc->hash = 5381; desc->hash = ((desc->hash << 5) + desc->hash) + 44; for ( int i=0; i<4; i++ ){ desc->range[i] = range[i]; desc->orig_range[i] = range[i]; desc->hash = ((desc->hash << 5) + desc->hash) + range[i]; } desc->nargs = 5; desc->args = (ops_arg*)malloc(5*sizeof(ops_arg)); desc->args[0] = arg0; desc->hash = ((desc->hash << 5) + desc->hash) + arg0.dat->index; desc->args[1] = arg1; desc->hash = ((desc->hash << 5) + desc->hash) + arg1.dat->index; desc->args[2] = arg2; desc->hash = ((desc->hash << 5) + desc->hash) + arg2.dat->index; desc->args[3] = arg3; desc->hash = ((desc->hash << 5) + desc->hash) + arg3.dat->index; desc->args[4] = arg4; char *tmp = (char*)malloc(1*sizeof(double)); memcpy(tmp, arg4.data,1*sizeof(double)); desc->args[4].data = tmp; desc->function = ops_par_loop_tea_leaf_ppcg_init2_kernel_execute; if (OPS_diags > 1) { ops_timing_realloc(44,"tea_leaf_ppcg_init2_kernel"); } ops_enqueue_kernel(desc); } #endif
628fe91ed580d26397b7f061f4a70d9c19ae9b6e.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" using namespace std; #include <stdio.h> #include <time.h> /////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////// template < typename F > struct vArray { F* _; size_t n; vArray( F* _, size_t n ) : _( _ ) , n( n ) { } __host__ __device__ F& operator[]( size_t I ) const { return _[ I ]; } }; /////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////// template < typename F > struct Array : vArray< F > { ~ Array() { hipFree( vArray< F >::_ ); } static F* Malloc( size_t N ) { F* _; hipMallocManaged( &_, N * sizeof( F ) ); return _; } Array( size_t n ) : vArray< F >( Malloc( n ), n ) { } }; /////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////// template < typename F > struct vMatrix { F* _; size_t h; size_t w; size_t v; vMatrix( F* _, size_t h, size_t w, size_t v ) : _( _ ) , h( h ) , w( w ) , v( v ) { } __host__ __device__ F& operator()( size_t Y, size_t X ) const { return _[ Y * v + X ]; } vArray< F > operator[]( size_t I ) const { return vArray< F >( _ + I * v , w ); } }; /////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////// #include <iostream> template < typename F > ostream& operator <<( ostream& S, const vMatrix< F >& P ) { for ( size_t y = 0; y < P.h; y++ ) { for ( size_t x = 0; x < P.w; x++ ) S << " " << P( y, x ); S << endl; } return S; } template < typename F > struct Matrix : vMatrix< F > { ~ Matrix() { hipFree( vMatrix< F >::_ ); } static F* Malloc( size_t N ) { F* _; hipMallocManaged( &_, N * sizeof( F ) ); return _; } Matrix( size_t h, size_t w ) : vMatrix< F >( Malloc( h * w ), h, w, w ) { } Matrix( const vMatrix< F >& _ ) : vMatrix< F >( Malloc( _.h * _.w ), _.h, _.w, _.w ) { for ( size_t y = 0; y < _.h; y++ ) for ( size_t x = 0; x < _.w; x++ ) (*this)( y, x ) = _( y, x ); } Matrix( const Matrix< F >& _ ) : Matrix< F >( (vMatrix< F >)_ ) { } }; /////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////// #define UNITS( p, q ) ( ( p + q - 1 ) / q ) #define B_S 256 inline dim3 grid1D( size_t N ) { return dim3( UNITS( N, B_S ) ); } inline dim3 thread1D() { return dim3( B_S ); } #define B_S_H 32 #define B_S_W 32 inline dim3 grid2D( size_t H, size_t W ) { return dim3( UNITS( W, B_S_W ), UNITS( H, B_S_H ) ); } inline dim3 thread2D() { return dim3( B_S_W, B_S_H ); } #include <vector> template < typename F, int Y, int X > Matrix< F > MakeMatrix( initializer_list< F > args ) { vector< F > argsV = args; Matrix< F > _( Y, X ); for ( size_t y = 0; y < Y; y++ ) { for ( size_t x = 0; x < X; x++ ) { _( y, x ) = argsV[ y * X + x ]; } } return _; } // Sigmoid function /////////////////////////////////////////////////////////////////////////////////////////////////////// // Definition of sigmoid function template < typename F > __global__ void SIGMOID( vMatrix< F > V, vMatrix< F > P ) { auto y = (size_t)blockIdx.y * blockDim.y + threadIdx.y; if ( y >= V.h ) return; auto x = (size_t)blockIdx.x * blockDim.x + threadIdx.x; if ( x >= V.w ) return; V( y, x ) = 1 / ( 1 + exp( - P( y, x ) ) ); } template < typename F > Matrix< F > sigmoid( const vMatrix< F >& P ) { Matrix< F > v( P.h, P.w ); hipLaunchKernelGGL(( SIGMOID), dim3(grid2D( v.h, v.w )), dim3(thread2D()) , 0, 0, v, P ); hipDeviceSynchronize(); return v; } // Annotation / function test template < typename F > void sigmoid_txt() { cout << "test: sigmoid function" << endl; cout << "input matrix:" << endl; cout << MakeMatrix<F, 2 , 5>( { -1, -0.5, 0, 0.5, 1, 1, 2, 3, 4, 5 } ) << endl; cout << "output matrix:" << endl; cout << sigmoid( MakeMatrix<F, 2 , 5>( { -1, -0.5, 0, 0.5, 1, 1, 2, 3, 4, 5 } )); } // ReLU /////////////////////////////////////////////////////////////////////////////////////////////////////////////////// // Defnition of ReLU (Rectified Linear Unit) function template < typename F > __global__ void RELU( vMatrix< F > V, vMatrix< F > P ) { auto y = (size_t)blockIdx.y * blockDim.y + threadIdx.y; if ( y >= V.h ) return; auto x = (size_t)blockIdx.x * blockDim.x + threadIdx.x; if ( x >= V.w ) return; V( y, x ) = max( F( 0 ), P( y, x ) ); } template < typename F > Matrix< F > ReLU( const vMatrix< F >& P ) { Matrix< F > v( P.h, P.w ); hipLaunchKernelGGL(( RELU), dim3(grid2D( v.h, v.w )), dim3(thread2D()) , 0, 0, v, P ); hipDeviceSynchronize(); return v; } // Annotation / fuction test template < typename F > void ReLU_txt() { cout << " test: ReLU function" << endl; cout << "input matrix:" << endl; cout << MakeMatrix<F, 2 , 5>( { -1, -0.5, 0, 0.5, 1, 1, 2, 3, 4, 5 } ) << endl; cout << "output matrix:" << endl; cout << ReLU( MakeMatrix<F, 2 , 5>( { -1, -0.5, 0, 0.5, 1, 1, 2, 3, 4, 5 } )); } // Dot operation ////////////////////////////////////////////////////////////////////////////////////////////////////////// // Definition of dot operation template < typename F > __global__ void DOT( vMatrix< F > V, vMatrix< F > L, vMatrix< F > R, size_t WH ) { auto y = (size_t)blockIdx.y * blockDim.y + threadIdx.y; if ( y >= V.h ) return; auto x = (size_t)blockIdx.x * blockDim.x + threadIdx.x; if ( x >= V.w ) return; auto lp = L._ + y * L.v; auto rp = R._ + x; F w = 0; for ( size_t _ = 0; _ < WH; _++ ) w += lp[ _ ] * rp[ _ * R.v ]; V( y, x ) = w; } template < typename F > Matrix< F > operator *( const vMatrix< F >& L, const vMatrix< F >& R ) { Matrix< F > v( L.h, R.w ); hipLaunchKernelGGL(( DOT), dim3(grid2D( v.h, v.w )), dim3(thread2D()) , 0, 0, v, L, R, L.w ); hipDeviceSynchronize(); return v; } // Annotation / fuction test template < typename F > void dot_operator_txt() { cout << " test: dot operation" << endl; auto l = MakeMatrix< F, 2, 3 >( { 1, 2, 3, 4, 5, 6 } ); auto r = MakeMatrix< F, 3, 2 >( { 1, 2, 3, 4, 5, 6 } ); cout << l * r; } /////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////// #include <map> template < typename F > map< string, Matrix< F > > init_network() { map< string, Matrix< F > > _; _.emplace( "W1", MakeMatrix< F, 2, 3 >( { 0.1, 0.3, 0.5, 0.2, 0.4, 0.6 } ) ); _.emplace( "b1", MakeMatrix< F, 1, 3 >( { 0.1, 0.2, 0.3 } ) ); _.emplace( "W2", MakeMatrix< F, 3, 2 >( { 0.1, 0.4, 0.2, 0.5, 0.3, 0.6 } ) ); _.emplace( "b2", MakeMatrix< F, 1, 2 >( { 0.1, 0.2 } ) ); _.emplace( "W3", MakeMatrix< F, 2, 2 >( { 0.1, 0.3, 0.2, 0.4 } ) ); _.emplace( "b3", MakeMatrix< F, 1, 2 >( { 0.1, 0.2 } ) ); return _; } // Identify function template < typename F > Matrix< F > identify_function( const Matrix< F >& _ ) { return _; } template < typename F > __global__ void ADD( vMatrix< F > V, vMatrix< F > L, vMatrix< F > R ) { auto y = (size_t)blockIdx.y * blockDim.y + threadIdx.y; if ( y >= V.h ) return; auto x = (size_t)blockIdx.x * blockDim.x + threadIdx.x; if ( x >= V.w ) return; V( y, x ) = L( y, x ) + R( y, x ); } template < typename F > Matrix< F > operator +( const vMatrix< F >& L, const vMatrix< F >& R ) { Matrix< F > v( L.h, R.w ); hipLaunchKernelGGL(( ADD), dim3(grid2D( v.h, v.w )), dim3(thread2D()) , 0, 0, v, L, R ); hipDeviceSynchronize(); return v; } // Forward propagation template < typename F > Matrix< F > forward( map< string, Matrix< F > >& network, const vMatrix< F >& x ) { auto W1 = network.at( "W1" ); auto W2 = network.at( "W2" ); auto W3 = network.at( "W3" ); auto b1 = network.at( "b1" ); auto b2 = network.at( "b2" ); auto b3 = network.at( "b3" ); auto a1 = x * W1 + b1; auto z1 = sigmoid( a1 ); auto a2 = z1 * W2 + b2; auto z2 = sigmoid( a2 ); auto a3 = z2 * W3 + b3; auto y = identify_function( a3 ); return y; } // Annotation / fuction test template < typename F > void network_txt() { cout << "Initialize network with weights" << endl; auto network = init_network< F >(); cout << "test network" << endl; auto x = MakeMatrix< F, 1, 2 >( { 1.0, 0.5 } ); auto y = forward( network, x ); cout << y; } // Softmax (primitive) ///////////////////////////////////////////////////////////////////////////////////////////////////// template < typename F > __global__ void EXP( vMatrix< F > V, vMatrix< F > P ) { auto y = (size_t)blockIdx.y * blockDim.y + threadIdx.y; if ( y >= V.h ) return; auto x = (size_t)blockIdx.x * blockDim.x + threadIdx.x; if ( x >= V.w ) return; V( y, x ) = exp( P( y, x ) ); } template < typename F > Matrix< F > exp( const vMatrix< F >& P ) { Matrix< F > v( P.h, P.w ); hipLaunchKernelGGL(( EXP), dim3(grid2D( v.h, v.w )), dim3(thread2D()) , 0, 0, v, P ); hipDeviceSynchronize(); return v; } template < typename F > F sum( const vMatrix< F >& P ) { F _ = 0; for ( size_t y = 0; y < P.h; y++ ) for ( size_t x = 0; x < P.w; x++ ) _ += P( y, x ); return _; } template < typename F > __global__ void DIV_INP( vMatrix< F > V, F P ) { auto y = (size_t)blockIdx.y * blockDim.y + threadIdx.y; if ( y >= V.h ) return; auto x = (size_t)blockIdx.x * blockDim.x + threadIdx.x; if ( x >= V.w ) return; V( y, x ) /= P; } template < typename F > void operator /=( const vMatrix< F >& L, F R ) { hipLaunchKernelGGL(( DIV_INP), dim3(grid2D( L.h, L.w )), dim3(thread2D()) , 0, 0, L, R ); hipDeviceSynchronize(); } template < typename F > Matrix< F > softmax_primitive( const vMatrix< F >& p ) { auto v = exp( p ); v /= sum( v ); return v; } template < typename F > void softmax_primitive_txt() { cout << "softmax_primitive" << endl; cout << softmax_primitive( MakeMatrix< F, 1, 3 >( { 0.3, 2.9, 4.0 } ) ); } // Softmax///////////////////////////////////////////////////////////////////////////////////////////////////////////////////////// template < typename F > F max( const vMatrix< F >& P ) { F _ = P( 0, 0 ); for ( size_t y = 0; y < P.h; y++ ) for ( size_t x = 0; x < P.w; x++ ) if ( P( y, x ) > _ ) _ = P( y, x ); return _; } template < typename F > __global__ void SUB_C( vMatrix< F > V, vMatrix< F > L, F R ) { auto y = (size_t)blockIdx.y * blockDim.y + threadIdx.y; if ( y >= V.h ) return; auto x = (size_t)blockIdx.x * blockDim.x + threadIdx.x; if ( x >= V.w ) return; V( y, x ) = L( y, x ) - R; } template < typename F > Matrix< F > operator -( const vMatrix< F >& L, F R ) { Matrix< F > v( L.h, L.w ); hipLaunchKernelGGL(( SUB_C), dim3(grid2D( v.h, v.w )), dim3(thread2D()) , 0, 0, v, L, R ); hipDeviceSynchronize(); return v; } template < typename F > Matrix< F > softmax( const vMatrix< F >& p ) { auto v = exp( p - max( p ) ); v /= sum( v ); return v; } template < typename F > void softmax_txt() { cout << "softmax" << endl; cout << softmax( MakeMatrix< F, 1, 3 >( { 1010, 1000, 990 } ) ); } /////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////// template < typename F > void sum_softmax() { cout << "sum( softmax )" << endl; cout << sum( softmax( MakeMatrix< F, 1, 3 >( { 0.3, 2.9, 4.0 } ) ) ) << endl; } /////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////// #include <fstream> template < typename F > map< string, Matrix< F > > get_data() { map< string, Matrix< F > > v; { ifstream ifs( "../mnist_data/train-images.idx3-ubyte" ); if ( ! ifs.is_open() ) throw "../mnist_data/train-images.idx3-ubyte"; ifs.ignore( 16 ); Matrix< F > w( 60000, 28 * 28 ); for ( size_t _ = 0; _ < w.h * w.w; _++ ) w._[ _ ] = ( (unsigned char)ifs.get() ) / 255.0; v.emplace( "x_train", w ); } { ifstream ifs( "../mnist_data/train-labels.idx1-ubyte" ); if ( ! ifs.is_open() ) throw "../mnist_data/train-labels.idx1-ubyte"; ifs.ignore( 8 ); Matrix< F > w( 1, 60000 ); for ( size_t _ = 0; _ < w.h * w.w; _++ ) w._[ _ ] = ifs.get(); v.emplace( "t_train", w ); } { ifstream ifs( "../mnist_data/t10k-images.idx3-ubyte" ); if ( ! ifs.is_open() ) throw "../mnist_data/t10k-images.idx3-ubyte"; ifs.ignore( 16 ); Matrix< F > w( 10000, 28 * 28 ); for ( size_t _ = 0; _ < w.h * w.w; _++ ) w._[ _ ] = ( (unsigned char)ifs.get() ) / 255.0; v.emplace( "x_test", w ); } { ifstream ifs( "../mnist_data/t10k-labels.idx1-ubyte" ); if ( ! ifs.is_open() ) throw "../mnist_datat10k-labels.idx1-ubyte"; ifs.ignore( 8 ); Matrix< F > w( 1, 10000 ); for ( size_t _ = 0; _ < w.h * w.w; _++ ) w._[ _ ] = ifs.get(); v.emplace( "t_test", w ); } return v; } map< string, Matrix< double > > init_network() { map< string, Matrix< double > > v; ifstream ifs( "../mnist_data/sample_weight.bin" ); if ( ! ifs.is_open() ) throw "../mnist_data/sample_weight.bin"; { Matrix< double > w( 784, 50 ); ifs.read( (char*)w._, w.h * w.w * sizeof( double ) ); v.emplace( "W1", w ); } { Matrix< double > w( 50, 100 ); ifs.read( (char*)w._, w.h * w.w * sizeof( double ) ); v.emplace( "W2", w ); } { Matrix< double > w( 100, 10 ); ifs.read( (char*)w._, w.h * w.w * sizeof( double ) ); v.emplace( "W3", w ); } { Matrix< double > w( 1, 50 ); ifs.read( (char*)w._, w.h * w.w * sizeof( double ) ); v.emplace( "b1", w ); } { Matrix< double > w( 1, 100 ); ifs.read( (char*)w._, w.h * w.w * sizeof( double ) ); v.emplace( "b2", w ); } { Matrix< double > w( 1, 10 ); ifs.read( (char*)w._, w.h * w.w * sizeof( double ) ); v.emplace( "b3", w ); } return v; } template < typename F > __global__ void ADD( vMatrix< F > V, vMatrix< F > L, vArray< F > R ) { auto y = (size_t)blockIdx.y * blockDim.y + threadIdx.y; if ( y >= V.h ) return; auto x = (size_t)blockIdx.x * blockDim.x + threadIdx.x; if ( x >= V.w ) return; V( y, x ) = L( y, x ) + R[ x ]; } template < typename F > Matrix< F > operator +( const vMatrix< F >& L, const vArray< F >& R ) { Matrix< F > v( L.h, L.w ); hipLaunchKernelGGL(( ADD), dim3(grid2D( v.h, v.w )), dim3(thread2D()) , 0, 0, v, L, R ); hipDeviceSynchronize(); return v; } template < typename F > Matrix< F > predict( map< string, Matrix< F > >& network, const vMatrix< F >& x ) { Matrix< F >& W1 = network.at( "W1" ); Matrix< F >& W2 = network.at( "W2" ); Matrix< F >& W3 = network.at( "W3" ); auto b1 = network.at( "b1" )[ 0 ]; auto b2 = network.at( "b2" )[ 0 ]; auto b3 = network.at( "b3" )[ 0 ]; auto a1 = x * W1 + b1; auto z1 = sigmoid( a1 ); auto a2 = z1 * W2 + b2; auto z2 = sigmoid( a2 ); auto a3 = z2 * W3 + b3; auto y = softmax( a3 ); return y; } template < typename F > F argmax( const vArray< F >& P ) { size_t _ = 0; for ( size_t i = 1; i < P.n; i++ ) if ( P[ i ] > P[ _ ] ) _ = i; return F( _ ); } template < typename F > Array< F > argmax( const vMatrix< F >& P ) { Array< F > _( P.h ); for ( size_t y = 0; y < P.h; y++ ) _[ y ] = argmax( P[ y ] ); return _; } template < typename F > vArray< F > Part( const vArray< F >& _, size_t O, size_t N ) { return vArray< F >( _._ + O , N ); } template < typename F > vMatrix< F > Part( const vMatrix< F >& _, size_t Y, size_t X, size_t H, size_t W ) { return vMatrix< F >( _._ + Y * _.v + X , H , W , _.v ); } void mnist() { cout << "MNIST" << endl; auto w = get_data< double >(); auto x_test = w.at( "x_test" ); auto t_test = w.at( "t_test" )[ 0 ]; auto network = init_network(); auto accuracy_cnt = 0; for ( size_t i = 0; i < x_test.h; i++ ) { auto y = predict( network, Part( x_test, i, 0, 1, x_test.w ) ); auto p = argmax( y[ 0 ] ); if ( p == t_test[ i ] ) accuracy_cnt++; } cout << "accuracy_cnt: " << ( ( double)accuracy_cnt / (double)x_test.h ) << endl; } //////////////////////////////////////////////////////////////////////////////// 3.6.3 template < typename F > size_t CountEquals( const vArray< F >& L, const vArray< F >& R ) { size_t _ = 0; for ( size_t i = 0; i < L.n; i++ ) if ( L[ i ] == R[ i ] ) _++; return _; } void mnist_batch() { cout << "MNIST BATCH" << endl; clock_t start_get = clock(); auto w = get_data< double >(); auto x_test = w.at( "x_test" ); auto t_test = w.at( "t_test" )[ 0 ]; clock_t end_get = clock(); const double time_get = static_cast<double>(end_get - start_get) / CLOCKS_PER_SEC * 1000.0; printf("Elapsed time (import data) %lf[ms]\n", time_get); clock_t start_net = clock(); auto network = init_network(); clock_t end_net = clock(); const double time_net = static_cast<double>(end_net - start_net) / CLOCKS_PER_SEC * 1000.0; printf("Elapsed time (network initialization) %lf[ms]\n", time_net); clock_t start_pre = clock(); auto accuracy_cnt = 0; int batch_size = 100; for ( size_t i = 0; i < x_test.h; i += batch_size ) { auto y = predict( network, Part( x_test, i, 0, 100, x_test.w ) ); auto p = argmax( y ); accuracy_cnt += CountEquals( p, Part( t_test, i, 100 ) ); } clock_t end_pre = clock(); const double time_pre = static_cast<double>(end_pre - start_pre) / CLOCKS_PER_SEC * 1000.0; printf("Elapsed time (prediction) %lf[ms]\n", time_pre); cout << "accuracy_cnt: " << ( ( double)accuracy_cnt / (double)x_test.h ) << endl; } //////////////////////////////////////////////////////////////////////////////// Main template < typename F > void Main() { sigmoid_txt< F >(); ReLU_txt< F >(); dot_operator_txt< F >(); network_txt< F >(); softmax_txt< F >(); softmax_primitive_txt< F >(); sum_softmax< F >(); mnist(); clock_t start = clock(); mnist_batch(); clock_t end = clock(); const double time = static_cast<double>(end - start) / CLOCKS_PER_SEC * 1000.0; printf("Total elapsed time %lf[ms]\n", time); } int main( int argc, char* argv[] ) { Main< double >(); }
628fe91ed580d26397b7f061f4a70d9c19ae9b6e.cu
using namespace std; #include <stdio.h> #include <time.h> /////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////// template < typename F > struct vArray { F* _; size_t n; vArray( F* _, size_t n ) : _( _ ) , n( n ) { } __host__ __device__ F& operator[]( size_t I ) const { return _[ I ]; } }; /////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////// template < typename F > struct Array : vArray< F > { ~ Array() { cudaFree( vArray< F >::_ ); } static F* Malloc( size_t N ) { F* _; cudaMallocManaged( &_, N * sizeof( F ) ); return _; } Array( size_t n ) : vArray< F >( Malloc( n ), n ) { } }; /////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////// template < typename F > struct vMatrix { F* _; size_t h; size_t w; size_t v; vMatrix( F* _, size_t h, size_t w, size_t v ) : _( _ ) , h( h ) , w( w ) , v( v ) { } __host__ __device__ F& operator()( size_t Y, size_t X ) const { return _[ Y * v + X ]; } vArray< F > operator[]( size_t I ) const { return vArray< F >( _ + I * v , w ); } }; /////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////// #include <iostream> template < typename F > ostream& operator <<( ostream& S, const vMatrix< F >& P ) { for ( size_t y = 0; y < P.h; y++ ) { for ( size_t x = 0; x < P.w; x++ ) S << " " << P( y, x ); S << endl; } return S; } template < typename F > struct Matrix : vMatrix< F > { ~ Matrix() { cudaFree( vMatrix< F >::_ ); } static F* Malloc( size_t N ) { F* _; cudaMallocManaged( &_, N * sizeof( F ) ); return _; } Matrix( size_t h, size_t w ) : vMatrix< F >( Malloc( h * w ), h, w, w ) { } Matrix( const vMatrix< F >& _ ) : vMatrix< F >( Malloc( _.h * _.w ), _.h, _.w, _.w ) { for ( size_t y = 0; y < _.h; y++ ) for ( size_t x = 0; x < _.w; x++ ) (*this)( y, x ) = _( y, x ); } Matrix( const Matrix< F >& _ ) : Matrix< F >( (vMatrix< F >)_ ) { } }; /////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////// #define UNITS( p, q ) ( ( p + q - 1 ) / q ) #define B_S 256 inline dim3 grid1D( size_t N ) { return dim3( UNITS( N, B_S ) ); } inline dim3 thread1D() { return dim3( B_S ); } #define B_S_H 32 #define B_S_W 32 inline dim3 grid2D( size_t H, size_t W ) { return dim3( UNITS( W, B_S_W ), UNITS( H, B_S_H ) ); } inline dim3 thread2D() { return dim3( B_S_W, B_S_H ); } #include <vector> template < typename F, int Y, int X > Matrix< F > MakeMatrix( initializer_list< F > args ) { vector< F > argsV = args; Matrix< F > _( Y, X ); for ( size_t y = 0; y < Y; y++ ) { for ( size_t x = 0; x < X; x++ ) { _( y, x ) = argsV[ y * X + x ]; } } return _; } // Sigmoid function /////////////////////////////////////////////////////////////////////////////////////////////////////// // Definition of sigmoid function template < typename F > __global__ void SIGMOID( vMatrix< F > V, vMatrix< F > P ) { auto y = (size_t)blockIdx.y * blockDim.y + threadIdx.y; if ( y >= V.h ) return; auto x = (size_t)blockIdx.x * blockDim.x + threadIdx.x; if ( x >= V.w ) return; V( y, x ) = 1 / ( 1 + exp( - P( y, x ) ) ); } template < typename F > Matrix< F > sigmoid( const vMatrix< F >& P ) { Matrix< F > v( P.h, P.w ); SIGMOID<<< grid2D( v.h, v.w ), thread2D() >>>( v, P ); cudaDeviceSynchronize(); return v; } // Annotation / function test template < typename F > void sigmoid_txt() { cout << "test: sigmoid function" << endl; cout << "input matrix:" << endl; cout << MakeMatrix<F, 2 , 5>( { -1, -0.5, 0, 0.5, 1, 1, 2, 3, 4, 5 } ) << endl; cout << "output matrix:" << endl; cout << sigmoid( MakeMatrix<F, 2 , 5>( { -1, -0.5, 0, 0.5, 1, 1, 2, 3, 4, 5 } )); } // ReLU /////////////////////////////////////////////////////////////////////////////////////////////////////////////////// // Defnition of ReLU (Rectified Linear Unit) function template < typename F > __global__ void RELU( vMatrix< F > V, vMatrix< F > P ) { auto y = (size_t)blockIdx.y * blockDim.y + threadIdx.y; if ( y >= V.h ) return; auto x = (size_t)blockIdx.x * blockDim.x + threadIdx.x; if ( x >= V.w ) return; V( y, x ) = max( F( 0 ), P( y, x ) ); } template < typename F > Matrix< F > ReLU( const vMatrix< F >& P ) { Matrix< F > v( P.h, P.w ); RELU<<< grid2D( v.h, v.w ), thread2D() >>>( v, P ); cudaDeviceSynchronize(); return v; } // Annotation / fuction test template < typename F > void ReLU_txt() { cout << " test: ReLU function" << endl; cout << "input matrix:" << endl; cout << MakeMatrix<F, 2 , 5>( { -1, -0.5, 0, 0.5, 1, 1, 2, 3, 4, 5 } ) << endl; cout << "output matrix:" << endl; cout << ReLU( MakeMatrix<F, 2 , 5>( { -1, -0.5, 0, 0.5, 1, 1, 2, 3, 4, 5 } )); } // Dot operation ////////////////////////////////////////////////////////////////////////////////////////////////////////// // Definition of dot operation template < typename F > __global__ void DOT( vMatrix< F > V, vMatrix< F > L, vMatrix< F > R, size_t WH ) { auto y = (size_t)blockIdx.y * blockDim.y + threadIdx.y; if ( y >= V.h ) return; auto x = (size_t)blockIdx.x * blockDim.x + threadIdx.x; if ( x >= V.w ) return; auto lp = L._ + y * L.v; auto rp = R._ + x; F w = 0; for ( size_t _ = 0; _ < WH; _++ ) w += lp[ _ ] * rp[ _ * R.v ]; V( y, x ) = w; } template < typename F > Matrix< F > operator *( const vMatrix< F >& L, const vMatrix< F >& R ) { Matrix< F > v( L.h, R.w ); DOT<<< grid2D( v.h, v.w ), thread2D() >>>( v, L, R, L.w ); cudaDeviceSynchronize(); return v; } // Annotation / fuction test template < typename F > void dot_operator_txt() { cout << " test: dot operation" << endl; auto l = MakeMatrix< F, 2, 3 >( { 1, 2, 3, 4, 5, 6 } ); auto r = MakeMatrix< F, 3, 2 >( { 1, 2, 3, 4, 5, 6 } ); cout << l * r; } /////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////// #include <map> template < typename F > map< string, Matrix< F > > init_network() { map< string, Matrix< F > > _; _.emplace( "W1", MakeMatrix< F, 2, 3 >( { 0.1, 0.3, 0.5, 0.2, 0.4, 0.6 } ) ); _.emplace( "b1", MakeMatrix< F, 1, 3 >( { 0.1, 0.2, 0.3 } ) ); _.emplace( "W2", MakeMatrix< F, 3, 2 >( { 0.1, 0.4, 0.2, 0.5, 0.3, 0.6 } ) ); _.emplace( "b2", MakeMatrix< F, 1, 2 >( { 0.1, 0.2 } ) ); _.emplace( "W3", MakeMatrix< F, 2, 2 >( { 0.1, 0.3, 0.2, 0.4 } ) ); _.emplace( "b3", MakeMatrix< F, 1, 2 >( { 0.1, 0.2 } ) ); return _; } // Identify function template < typename F > Matrix< F > identify_function( const Matrix< F >& _ ) { return _; } template < typename F > __global__ void ADD( vMatrix< F > V, vMatrix< F > L, vMatrix< F > R ) { auto y = (size_t)blockIdx.y * blockDim.y + threadIdx.y; if ( y >= V.h ) return; auto x = (size_t)blockIdx.x * blockDim.x + threadIdx.x; if ( x >= V.w ) return; V( y, x ) = L( y, x ) + R( y, x ); } template < typename F > Matrix< F > operator +( const vMatrix< F >& L, const vMatrix< F >& R ) { Matrix< F > v( L.h, R.w ); ADD<<< grid2D( v.h, v.w ), thread2D() >>>( v, L, R ); cudaDeviceSynchronize(); return v; } // Forward propagation template < typename F > Matrix< F > forward( map< string, Matrix< F > >& network, const vMatrix< F >& x ) { auto W1 = network.at( "W1" ); auto W2 = network.at( "W2" ); auto W3 = network.at( "W3" ); auto b1 = network.at( "b1" ); auto b2 = network.at( "b2" ); auto b3 = network.at( "b3" ); auto a1 = x * W1 + b1; auto z1 = sigmoid( a1 ); auto a2 = z1 * W2 + b2; auto z2 = sigmoid( a2 ); auto a3 = z2 * W3 + b3; auto y = identify_function( a3 ); return y; } // Annotation / fuction test template < typename F > void network_txt() { cout << "Initialize network with weights" << endl; auto network = init_network< F >(); cout << "test network" << endl; auto x = MakeMatrix< F, 1, 2 >( { 1.0, 0.5 } ); auto y = forward( network, x ); cout << y; } // Softmax (primitive) ///////////////////////////////////////////////////////////////////////////////////////////////////// template < typename F > __global__ void EXP( vMatrix< F > V, vMatrix< F > P ) { auto y = (size_t)blockIdx.y * blockDim.y + threadIdx.y; if ( y >= V.h ) return; auto x = (size_t)blockIdx.x * blockDim.x + threadIdx.x; if ( x >= V.w ) return; V( y, x ) = exp( P( y, x ) ); } template < typename F > Matrix< F > exp( const vMatrix< F >& P ) { Matrix< F > v( P.h, P.w ); EXP<<< grid2D( v.h, v.w ), thread2D() >>>( v, P ); cudaDeviceSynchronize(); return v; } template < typename F > F sum( const vMatrix< F >& P ) { F _ = 0; for ( size_t y = 0; y < P.h; y++ ) for ( size_t x = 0; x < P.w; x++ ) _ += P( y, x ); return _; } template < typename F > __global__ void DIV_INP( vMatrix< F > V, F P ) { auto y = (size_t)blockIdx.y * blockDim.y + threadIdx.y; if ( y >= V.h ) return; auto x = (size_t)blockIdx.x * blockDim.x + threadIdx.x; if ( x >= V.w ) return; V( y, x ) /= P; } template < typename F > void operator /=( const vMatrix< F >& L, F R ) { DIV_INP<<< grid2D( L.h, L.w ), thread2D() >>>( L, R ); cudaDeviceSynchronize(); } template < typename F > Matrix< F > softmax_primitive( const vMatrix< F >& p ) { auto v = exp( p ); v /= sum( v ); return v; } template < typename F > void softmax_primitive_txt() { cout << "softmax_primitive" << endl; cout << softmax_primitive( MakeMatrix< F, 1, 3 >( { 0.3, 2.9, 4.0 } ) ); } // Softmax///////////////////////////////////////////////////////////////////////////////////////////////////////////////////////// template < typename F > F max( const vMatrix< F >& P ) { F _ = P( 0, 0 ); for ( size_t y = 0; y < P.h; y++ ) for ( size_t x = 0; x < P.w; x++ ) if ( P( y, x ) > _ ) _ = P( y, x ); return _; } template < typename F > __global__ void SUB_C( vMatrix< F > V, vMatrix< F > L, F R ) { auto y = (size_t)blockIdx.y * blockDim.y + threadIdx.y; if ( y >= V.h ) return; auto x = (size_t)blockIdx.x * blockDim.x + threadIdx.x; if ( x >= V.w ) return; V( y, x ) = L( y, x ) - R; } template < typename F > Matrix< F > operator -( const vMatrix< F >& L, F R ) { Matrix< F > v( L.h, L.w ); SUB_C<<< grid2D( v.h, v.w ), thread2D() >>>( v, L, R ); cudaDeviceSynchronize(); return v; } template < typename F > Matrix< F > softmax( const vMatrix< F >& p ) { auto v = exp( p - max( p ) ); v /= sum( v ); return v; } template < typename F > void softmax_txt() { cout << "softmax" << endl; cout << softmax( MakeMatrix< F, 1, 3 >( { 1010, 1000, 990 } ) ); } /////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////// template < typename F > void sum_softmax() { cout << "sum( softmax )" << endl; cout << sum( softmax( MakeMatrix< F, 1, 3 >( { 0.3, 2.9, 4.0 } ) ) ) << endl; } /////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////// #include <fstream> template < typename F > map< string, Matrix< F > > get_data() { map< string, Matrix< F > > v; { ifstream ifs( "../mnist_data/train-images.idx3-ubyte" ); if ( ! ifs.is_open() ) throw "../mnist_data/train-images.idx3-ubyte"; ifs.ignore( 16 ); Matrix< F > w( 60000, 28 * 28 ); for ( size_t _ = 0; _ < w.h * w.w; _++ ) w._[ _ ] = ( (unsigned char)ifs.get() ) / 255.0; v.emplace( "x_train", w ); } { ifstream ifs( "../mnist_data/train-labels.idx1-ubyte" ); if ( ! ifs.is_open() ) throw "../mnist_data/train-labels.idx1-ubyte"; ifs.ignore( 8 ); Matrix< F > w( 1, 60000 ); for ( size_t _ = 0; _ < w.h * w.w; _++ ) w._[ _ ] = ifs.get(); v.emplace( "t_train", w ); } { ifstream ifs( "../mnist_data/t10k-images.idx3-ubyte" ); if ( ! ifs.is_open() ) throw "../mnist_data/t10k-images.idx3-ubyte"; ifs.ignore( 16 ); Matrix< F > w( 10000, 28 * 28 ); for ( size_t _ = 0; _ < w.h * w.w; _++ ) w._[ _ ] = ( (unsigned char)ifs.get() ) / 255.0; v.emplace( "x_test", w ); } { ifstream ifs( "../mnist_data/t10k-labels.idx1-ubyte" ); if ( ! ifs.is_open() ) throw "../mnist_datat10k-labels.idx1-ubyte"; ifs.ignore( 8 ); Matrix< F > w( 1, 10000 ); for ( size_t _ = 0; _ < w.h * w.w; _++ ) w._[ _ ] = ifs.get(); v.emplace( "t_test", w ); } return v; } map< string, Matrix< double > > init_network() { map< string, Matrix< double > > v; ifstream ifs( "../mnist_data/sample_weight.bin" ); if ( ! ifs.is_open() ) throw "../mnist_data/sample_weight.bin"; { Matrix< double > w( 784, 50 ); ifs.read( (char*)w._, w.h * w.w * sizeof( double ) ); v.emplace( "W1", w ); } { Matrix< double > w( 50, 100 ); ifs.read( (char*)w._, w.h * w.w * sizeof( double ) ); v.emplace( "W2", w ); } { Matrix< double > w( 100, 10 ); ifs.read( (char*)w._, w.h * w.w * sizeof( double ) ); v.emplace( "W3", w ); } { Matrix< double > w( 1, 50 ); ifs.read( (char*)w._, w.h * w.w * sizeof( double ) ); v.emplace( "b1", w ); } { Matrix< double > w( 1, 100 ); ifs.read( (char*)w._, w.h * w.w * sizeof( double ) ); v.emplace( "b2", w ); } { Matrix< double > w( 1, 10 ); ifs.read( (char*)w._, w.h * w.w * sizeof( double ) ); v.emplace( "b3", w ); } return v; } template < typename F > __global__ void ADD( vMatrix< F > V, vMatrix< F > L, vArray< F > R ) { auto y = (size_t)blockIdx.y * blockDim.y + threadIdx.y; if ( y >= V.h ) return; auto x = (size_t)blockIdx.x * blockDim.x + threadIdx.x; if ( x >= V.w ) return; V( y, x ) = L( y, x ) + R[ x ]; } template < typename F > Matrix< F > operator +( const vMatrix< F >& L, const vArray< F >& R ) { Matrix< F > v( L.h, L.w ); ADD<<< grid2D( v.h, v.w ), thread2D() >>>( v, L, R ); cudaDeviceSynchronize(); return v; } template < typename F > Matrix< F > predict( map< string, Matrix< F > >& network, const vMatrix< F >& x ) { Matrix< F >& W1 = network.at( "W1" ); Matrix< F >& W2 = network.at( "W2" ); Matrix< F >& W3 = network.at( "W3" ); auto b1 = network.at( "b1" )[ 0 ]; auto b2 = network.at( "b2" )[ 0 ]; auto b3 = network.at( "b3" )[ 0 ]; auto a1 = x * W1 + b1; auto z1 = sigmoid( a1 ); auto a2 = z1 * W2 + b2; auto z2 = sigmoid( a2 ); auto a3 = z2 * W3 + b3; auto y = softmax( a3 ); return y; } template < typename F > F argmax( const vArray< F >& P ) { size_t _ = 0; for ( size_t i = 1; i < P.n; i++ ) if ( P[ i ] > P[ _ ] ) _ = i; return F( _ ); } template < typename F > Array< F > argmax( const vMatrix< F >& P ) { Array< F > _( P.h ); for ( size_t y = 0; y < P.h; y++ ) _[ y ] = argmax( P[ y ] ); return _; } template < typename F > vArray< F > Part( const vArray< F >& _, size_t O, size_t N ) { return vArray< F >( _._ + O , N ); } template < typename F > vMatrix< F > Part( const vMatrix< F >& _, size_t Y, size_t X, size_t H, size_t W ) { return vMatrix< F >( _._ + Y * _.v + X , H , W , _.v ); } void mnist() { cout << "MNIST" << endl; auto w = get_data< double >(); auto x_test = w.at( "x_test" ); auto t_test = w.at( "t_test" )[ 0 ]; auto network = init_network(); auto accuracy_cnt = 0; for ( size_t i = 0; i < x_test.h; i++ ) { auto y = predict( network, Part( x_test, i, 0, 1, x_test.w ) ); auto p = argmax( y[ 0 ] ); if ( p == t_test[ i ] ) accuracy_cnt++; } cout << "accuracy_cnt: " << ( ( double)accuracy_cnt / (double)x_test.h ) << endl; } //////////////////////////////////////////////////////////////////////////////// 3.6.3 template < typename F > size_t CountEquals( const vArray< F >& L, const vArray< F >& R ) { size_t _ = 0; for ( size_t i = 0; i < L.n; i++ ) if ( L[ i ] == R[ i ] ) _++; return _; } void mnist_batch() { cout << "MNIST BATCH" << endl; clock_t start_get = clock(); auto w = get_data< double >(); auto x_test = w.at( "x_test" ); auto t_test = w.at( "t_test" )[ 0 ]; clock_t end_get = clock(); const double time_get = static_cast<double>(end_get - start_get) / CLOCKS_PER_SEC * 1000.0; printf("Elapsed time (import data) %lf[ms]\n", time_get); clock_t start_net = clock(); auto network = init_network(); clock_t end_net = clock(); const double time_net = static_cast<double>(end_net - start_net) / CLOCKS_PER_SEC * 1000.0; printf("Elapsed time (network initialization) %lf[ms]\n", time_net); clock_t start_pre = clock(); auto accuracy_cnt = 0; int batch_size = 100; for ( size_t i = 0; i < x_test.h; i += batch_size ) { auto y = predict( network, Part( x_test, i, 0, 100, x_test.w ) ); auto p = argmax( y ); accuracy_cnt += CountEquals( p, Part( t_test, i, 100 ) ); } clock_t end_pre = clock(); const double time_pre = static_cast<double>(end_pre - start_pre) / CLOCKS_PER_SEC * 1000.0; printf("Elapsed time (prediction) %lf[ms]\n", time_pre); cout << "accuracy_cnt: " << ( ( double)accuracy_cnt / (double)x_test.h ) << endl; } //////////////////////////////////////////////////////////////////////////////// Main template < typename F > void Main() { sigmoid_txt< F >(); ReLU_txt< F >(); dot_operator_txt< F >(); network_txt< F >(); softmax_txt< F >(); softmax_primitive_txt< F >(); sum_softmax< F >(); mnist(); clock_t start = clock(); mnist_batch(); clock_t end = clock(); const double time = static_cast<double>(end - start) / CLOCKS_PER_SEC * 1000.0; printf("Total elapsed time %lf[ms]\n", time); } int main( int argc, char* argv[] ) { Main< double >(); }
4dd11c9b956168d895042843a7020e6a70905318.hip
// !!! This is a file automatically generated by hipify!!! #include <hip/hip_runtime.h> #include <device_launch_parameters.h> #include <stdio.h> #include <stdlib.h> #include <iostream> __global__ void sum_array_gpu(int *a,int *b,int *c,int size) { int gid = blockDim.x * blockDim.y * gridDim.x * blockIdx.y + blockDim.x * blockDim.y * blockIdx.x + blockDim.x * threadIdx.y + threadIdx.x; if (gid < size) { c[gid] = a[gid] + b[gid]; } //printf("gid : %d, a[gid] : %d, b[gid] : %d, c[gid] : %d\n", gid, a[gid], b[gid], c[gid]); } void sum_array_cpu(int *a, int *b, int *c, int size) { for(int i=0;i<size;i++){ c[i] = a[i] + b[i]; } } bool checkResult(int *a, int *b, int size) { for(int i=0;i<size;i++){ if(a[i]!=b[i]){ printf("the %d th current value of a[i] and b[i] is: %d, %d\n",i,a[i],b[i]); return false; } //printf("the current value of a[i] and b[i] are the same\n"); } return true; } int main(int argc, char *argv[]) { int size = 1000; int dim_x = 2; int dim_y = 2; int block_x = 16; int block_y = 16; int byte_size = size * sizeof(int); int *a_input,*b_input,*c_output,*gpu_output; a_input = (int*)malloc(byte_size); b_input = (int*)malloc(byte_size); c_output = (int*)malloc(byte_size); gpu_output = (int*)malloc(byte_size); for(int i=0;i<size;i++) { a_input[i] = i; b_input[i] = i*2; } //cpu matrix sum calculation sum_array_cpu(a_input,b_input,c_output,size); int * a_gpu_input, * b_gpu_input, *c_gpu_output; hipMalloc((void**)&a_gpu_input, byte_size); hipMalloc((void**)&b_gpu_input, byte_size); hipMalloc((void**)&c_gpu_output, byte_size); hipMemcpy(a_gpu_input,a_input,byte_size,hipMemcpyHostToDevice); hipMemcpy(b_gpu_input,b_input,byte_size,hipMemcpyHostToDevice); dim3 block(block_x,block_y); dim3 grid(dim_x,dim_y); printf("dimension of each block is: %d, %d\n", block.x, block.y); printf("dimension of grid is: %d, %d\n", grid.x, grid.y); hipLaunchKernelGGL(( sum_array_gpu), dim3(grid),dim3(block), 0, 0, a_gpu_input,b_gpu_input,c_gpu_output,size); hipDeviceSynchronize(); //memory transfer back to host hipMemcpy(gpu_output,c_gpu_output,byte_size,hipMemcpyDeviceToHost); bool test = checkResult(c_output,gpu_output,size); if(test==true){ printf("the result is true\n"); }else{ printf("the result is false\n"); } hipFree(a_gpu_input); hipFree(b_gpu_input); hipFree(c_gpu_output); free(a_input); free(b_input); free(c_output); hipDeviceReset(); return 0; }
4dd11c9b956168d895042843a7020e6a70905318.cu
#include <cuda_runtime.h> #include <device_launch_parameters.h> #include <stdio.h> #include <stdlib.h> #include <iostream> __global__ void sum_array_gpu(int *a,int *b,int *c,int size) { int gid = blockDim.x * blockDim.y * gridDim.x * blockIdx.y + blockDim.x * blockDim.y * blockIdx.x + blockDim.x * threadIdx.y + threadIdx.x; if (gid < size) { c[gid] = a[gid] + b[gid]; } //printf("gid : %d, a[gid] : %d, b[gid] : %d, c[gid] : %d\n", gid, a[gid], b[gid], c[gid]); } void sum_array_cpu(int *a, int *b, int *c, int size) { for(int i=0;i<size;i++){ c[i] = a[i] + b[i]; } } bool checkResult(int *a, int *b, int size) { for(int i=0;i<size;i++){ if(a[i]!=b[i]){ printf("the %d th current value of a[i] and b[i] is: %d, %d\n",i,a[i],b[i]); return false; } //printf("the current value of a[i] and b[i] are the same\n"); } return true; } int main(int argc, char *argv[]) { int size = 1000; int dim_x = 2; int dim_y = 2; int block_x = 16; int block_y = 16; int byte_size = size * sizeof(int); int *a_input,*b_input,*c_output,*gpu_output; a_input = (int*)malloc(byte_size); b_input = (int*)malloc(byte_size); c_output = (int*)malloc(byte_size); gpu_output = (int*)malloc(byte_size); for(int i=0;i<size;i++) { a_input[i] = i; b_input[i] = i*2; } //cpu matrix sum calculation sum_array_cpu(a_input,b_input,c_output,size); int * a_gpu_input, * b_gpu_input, *c_gpu_output; cudaMalloc((void**)&a_gpu_input, byte_size); cudaMalloc((void**)&b_gpu_input, byte_size); cudaMalloc((void**)&c_gpu_output, byte_size); cudaMemcpy(a_gpu_input,a_input,byte_size,cudaMemcpyHostToDevice); cudaMemcpy(b_gpu_input,b_input,byte_size,cudaMemcpyHostToDevice); dim3 block(block_x,block_y); dim3 grid(dim_x,dim_y); printf("dimension of each block is: %d, %d\n", block.x, block.y); printf("dimension of grid is: %d, %d\n", grid.x, grid.y); sum_array_gpu<<<grid,block>>>(a_gpu_input,b_gpu_input,c_gpu_output,size); cudaDeviceSynchronize(); //memory transfer back to host cudaMemcpy(gpu_output,c_gpu_output,byte_size,cudaMemcpyDeviceToHost); bool test = checkResult(c_output,gpu_output,size); if(test==true){ printf("the result is true\n"); }else{ printf("the result is false\n"); } cudaFree(a_gpu_input); cudaFree(b_gpu_input); cudaFree(c_gpu_output); free(a_input); free(b_input); free(c_output); cudaDeviceReset(); return 0; }
5eccf6b930ee330c6741ef62ad854edc322ee4a2.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include <algorithm> #include <cfloat> #include <vector> #include "caffe/util/math_functions.hpp" #include "caffe/layers/bernoulli_sample_layer.hpp" namespace caffe { template <typename Dtype> __global__ void BernoulliSampleForward(const int nthreads, const Dtype* p, const int D, Dtype* sample){ CUDA_KERNEL_LOOP(index, nthreads) { if(index>=nthreads) return; int item = index/D; int idim = index%D; if(sample[item*D+idim]>p[item*D+idim]) { sample[item*D+idim]=0; } else { sample[item*D+idim]=1; } } } template <typename Dtype> void BernoulliSampleLayer<Dtype>::Forward_gpu(const vector<Blob<Dtype>*>& bottom, const vector<Blob<Dtype>*>& top) { const Dtype* p=bottom[0]->gpu_data(); Dtype* sample=top[0]->mutable_gpu_data(); int N=bottom[0]->shape(0); int D=bottom[0]->count(1); caffe_gpu_rng_uniform<Dtype>(bottom[0]->count(),0.,1.,sample); hipLaunchKernelGGL(( BernoulliSampleForward<Dtype>), dim3(CAFFE_GET_BLOCKS(bottom[0]->count())), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0, bottom[0]->count(), p, D, sample); CUDA_POST_KERNEL_CHECK; } template <typename Dtype> void BernoulliSampleLayer<Dtype>::Backward_gpu(const vector<Blob<Dtype>*>& top, const vector<bool>& propagate_down, const vector<Blob<Dtype>*>& bottom) { } INSTANTIATE_LAYER_GPU_FUNCS(BernoulliSampleLayer); } // namespace caffe
5eccf6b930ee330c6741ef62ad854edc322ee4a2.cu
#include <algorithm> #include <cfloat> #include <vector> #include "caffe/util/math_functions.hpp" #include "caffe/layers/bernoulli_sample_layer.hpp" namespace caffe { template <typename Dtype> __global__ void BernoulliSampleForward(const int nthreads, const Dtype* p, const int D, Dtype* sample){ CUDA_KERNEL_LOOP(index, nthreads) { if(index>=nthreads) return; int item = index/D; int idim = index%D; if(sample[item*D+idim]>p[item*D+idim]) { sample[item*D+idim]=0; } else { sample[item*D+idim]=1; } } } template <typename Dtype> void BernoulliSampleLayer<Dtype>::Forward_gpu(const vector<Blob<Dtype>*>& bottom, const vector<Blob<Dtype>*>& top) { const Dtype* p=bottom[0]->gpu_data(); Dtype* sample=top[0]->mutable_gpu_data(); int N=bottom[0]->shape(0); int D=bottom[0]->count(1); caffe_gpu_rng_uniform<Dtype>(bottom[0]->count(),0.,1.,sample); BernoulliSampleForward<Dtype><<<CAFFE_GET_BLOCKS(bottom[0]->count()), CAFFE_CUDA_NUM_THREADS>>>( bottom[0]->count(), p, D, sample); CUDA_POST_KERNEL_CHECK; } template <typename Dtype> void BernoulliSampleLayer<Dtype>::Backward_gpu(const vector<Blob<Dtype>*>& top, const vector<bool>& propagate_down, const vector<Blob<Dtype>*>& bottom) { } INSTANTIATE_LAYER_GPU_FUNCS(BernoulliSampleLayer); } // namespace caffe
d66d336dae5aa0c95043ec950e887257c204b6f2.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /****************************************************************************** * Copyright 2020 The Apollo Authors. All Rights Reserved. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. *****************************************************************************/ /* * Copyright 2018-2019 Autoware Foundation. All rights reserved. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #include <algorithm> // headers in local files #include "modules/perception/lidar/lib/detection/lidar_point_pillars/anchor_mask_cuda.h" #include "modules/perception/lidar/lib/detection/lidar_point_pillars/common.h" namespace apollo { namespace perception { namespace lidar { // modified prefix sum code from // https://www.mimuw.edu.pl/~ps209291/kgkp/slides/scan.pdf __global__ void scan_x(int* g_odata, int* g_idata, int n) { extern __shared__ int temp[]; // allocated on invocation int thid = threadIdx.x; int bid = blockIdx.x; int bdim = blockDim.x; int offset = 1; temp[2 * thid] = g_idata[bid * bdim * 2 + 2 * thid]; // load input into shared memory temp[2 * thid + 1] = g_idata[bid * bdim * 2 + 2 * thid + 1]; for (int d = n >> 1; d > 0; d >>= 1) { // build sum in place up the tree __syncthreads(); if (thid < d) { int ai = offset * (2 * thid + 1) - 1; int bi = offset * (2 * thid + 2) - 1; temp[bi] += temp[ai]; } offset *= 2; } if (thid == 0) { temp[n - 1] = 0; } // clear the last element for (int d = 1; d < n; d *= 2) { // traverse down tree & build scan offset >>= 1; __syncthreads(); if (thid < d) { int ai = offset * (2 * thid + 1) - 1; int bi = offset * (2 * thid + 2) - 1; int t = temp[ai]; temp[ai] = temp[bi]; temp[bi] += t; } } __syncthreads(); g_odata[bid * bdim * 2 + 2 * thid] = temp[2 * thid + 1]; // write results to device memory int second_ind = 2 * thid + 2; if (second_ind == bdim * 2) { g_odata[bid * bdim * 2 + 2 * thid + 1] = temp[2 * thid + 1] + g_idata[bid * bdim * 2 + 2 * thid + 1]; } else { g_odata[bid * bdim * 2 + 2 * thid + 1] = temp[2 * thid + 2]; } } // modified prefix sum code from // https://www.mimuw.edu.pl/~ps209291/kgkp/slides/scan.pdf __global__ void scan_y(int* g_odata, int* g_idata, int n) { extern __shared__ int temp[]; // allocated on invocation int thid = threadIdx.x; int bid = blockIdx.x; int bdim = blockDim.x; int gdim = gridDim.x; int offset = 1; temp[2 * thid] = g_idata[bid + 2 * thid * gdim]; // load input into shared memory temp[2 * thid + 1] = g_idata[bid + 2 * thid * gdim + gdim]; for (int d = n >> 1; d > 0; d >>= 1) { // build sum in place up the tree __syncthreads(); if (thid < d) { int ai = offset * (2 * thid + 1) - 1; int bi = offset * (2 * thid + 2) - 1; temp[bi] += temp[ai]; } offset *= 2; } if (thid == 0) { temp[n - 1] = 0; } // clear the last element for (int d = 1; d < n; d *= 2) { // traverse down tree & build scan offset >>= 1; __syncthreads(); if (thid < d) { int ai = offset * (2 * thid + 1) - 1; int bi = offset * (2 * thid + 2) - 1; int t = temp[ai]; temp[ai] = temp[bi]; temp[bi] += t; } } __syncthreads(); g_odata[bid + 2 * thid * gdim] = temp[2 * thid + 1]; // write results to device memory int second_ind = 2 * thid + 2; if (second_ind == bdim * 2) { g_odata[bid + 2 * thid * gdim + gdim] = temp[2 * thid + 1] + g_idata[bid + 2 * thid * gdim + gdim]; } else { g_odata[bid + 2 * thid * gdim + gdim] = temp[2 * thid + 2]; } } __global__ void make_anchor_mask_kernel( const float* dev_box_anchors_min_x, const float* dev_box_anchors_min_y, const float* dev_box_anchors_max_x, const float* dev_box_anchors_max_y, int* dev_sparse_pillar_map, int* dev_anchor_mask, const float min_x_range, const float min_y_range, const float pillar_x_size, const float pillar_y_size, const int grid_x_size, const int grid_y_size, const int num_inds_for_scan) { int tid = threadIdx.x + blockIdx.x * blockDim.x; int anchor_coor[NUM_2D_BOX_CORNERS_MACRO] = {0}; const int grid_x_size_1 = grid_x_size - 1; // grid_x_size - 1 const int grid_y_size_1 = grid_y_size - 1; // grid_y_size - 1 anchor_coor[0] = floor((dev_box_anchors_min_x[tid] - min_x_range) / pillar_x_size); anchor_coor[1] = floor((dev_box_anchors_min_y[tid] - min_y_range) / pillar_y_size); anchor_coor[2] = floor((dev_box_anchors_max_x[tid] - min_x_range) / pillar_x_size); anchor_coor[3] = floor((dev_box_anchors_max_y[tid] - min_y_range) / pillar_y_size); anchor_coor[0] = max(anchor_coor[0], 0); anchor_coor[1] = max(anchor_coor[1], 0); anchor_coor[2] = min(anchor_coor[2], grid_x_size_1); anchor_coor[3] = min(anchor_coor[3], grid_y_size_1); int right_top = dev_sparse_pillar_map[anchor_coor[3] * num_inds_for_scan + anchor_coor[2]]; int left_bottom = dev_sparse_pillar_map[anchor_coor[1] * num_inds_for_scan + anchor_coor[0]]; int left_top = dev_sparse_pillar_map[anchor_coor[3] * num_inds_for_scan + anchor_coor[0]]; int right_bottom = dev_sparse_pillar_map[anchor_coor[1] * num_inds_for_scan + anchor_coor[2]]; int area = right_top - left_top - right_bottom + left_bottom; if (area > 1) { dev_anchor_mask[tid] = 1; } else { dev_anchor_mask[tid] = 0; } } AnchorMaskCuda::AnchorMaskCuda(const int num_threads, const int num_inds_for_scan, const int num_anchor, const float min_x_range, const float min_y_range, const float pillar_x_size, const float pillar_y_size, const int grid_x_size, const int grid_y_size) : num_threads_(num_threads), num_inds_for_scan_(num_inds_for_scan), num_anchor_(num_anchor), min_x_range_(min_x_range), min_y_range_(min_y_range), pillar_x_size_(pillar_x_size), pillar_y_size_(pillar_y_size), grid_x_size_(grid_x_size), grid_y_size_(grid_y_size) {} // TODO(chenjiahao): set anchor ranges for each class void AnchorMaskCuda::DoAnchorMaskCuda( int* dev_sparse_pillar_map, int* dev_cumsum_along_x, int* dev_cumsum_along_y, const float* dev_box_anchors_min_x, const float* dev_box_anchors_min_y, const float* dev_box_anchors_max_x, const float* dev_box_anchors_max_y, int* dev_anchor_mask) { hipLaunchKernelGGL(( scan_x), dim3(num_inds_for_scan_), dim3(num_inds_for_scan_ / 2), num_inds_for_scan_ * sizeof(int), 0, dev_cumsum_along_x, dev_sparse_pillar_map, num_inds_for_scan_); hipLaunchKernelGGL(( scan_y), dim3(num_inds_for_scan_), dim3(num_inds_for_scan_ / 2), num_inds_for_scan_ * sizeof(int), 0, dev_cumsum_along_y, dev_cumsum_along_x, num_inds_for_scan_); GPU_CHECK(hipMemcpy(dev_sparse_pillar_map, dev_cumsum_along_y, num_inds_for_scan_ * num_inds_for_scan_ * sizeof(int), hipMemcpyDeviceToDevice)); int num_blocks = DIVUP(num_anchor_, num_threads_); hipLaunchKernelGGL(( make_anchor_mask_kernel), dim3(num_blocks), dim3(num_threads_), 0, 0, dev_box_anchors_min_x, dev_box_anchors_min_y, dev_box_anchors_max_x, dev_box_anchors_max_y, dev_sparse_pillar_map, dev_anchor_mask, min_x_range_, min_y_range_, pillar_x_size_, pillar_y_size_, grid_x_size_, grid_y_size_, num_inds_for_scan_); } } // namespace lidar } // namespace perception } // namespace apollo
d66d336dae5aa0c95043ec950e887257c204b6f2.cu
/****************************************************************************** * Copyright 2020 The Apollo Authors. All Rights Reserved. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. *****************************************************************************/ /* * Copyright 2018-2019 Autoware Foundation. All rights reserved. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #include <algorithm> // headers in local files #include "modules/perception/lidar/lib/detection/lidar_point_pillars/anchor_mask_cuda.h" #include "modules/perception/lidar/lib/detection/lidar_point_pillars/common.h" namespace apollo { namespace perception { namespace lidar { // modified prefix sum code from // https://www.mimuw.edu.pl/~ps209291/kgkp/slides/scan.pdf __global__ void scan_x(int* g_odata, int* g_idata, int n) { extern __shared__ int temp[]; // allocated on invocation int thid = threadIdx.x; int bid = blockIdx.x; int bdim = blockDim.x; int offset = 1; temp[2 * thid] = g_idata[bid * bdim * 2 + 2 * thid]; // load input into shared memory temp[2 * thid + 1] = g_idata[bid * bdim * 2 + 2 * thid + 1]; for (int d = n >> 1; d > 0; d >>= 1) { // build sum in place up the tree __syncthreads(); if (thid < d) { int ai = offset * (2 * thid + 1) - 1; int bi = offset * (2 * thid + 2) - 1; temp[bi] += temp[ai]; } offset *= 2; } if (thid == 0) { temp[n - 1] = 0; } // clear the last element for (int d = 1; d < n; d *= 2) { // traverse down tree & build scan offset >>= 1; __syncthreads(); if (thid < d) { int ai = offset * (2 * thid + 1) - 1; int bi = offset * (2 * thid + 2) - 1; int t = temp[ai]; temp[ai] = temp[bi]; temp[bi] += t; } } __syncthreads(); g_odata[bid * bdim * 2 + 2 * thid] = temp[2 * thid + 1]; // write results to device memory int second_ind = 2 * thid + 2; if (second_ind == bdim * 2) { g_odata[bid * bdim * 2 + 2 * thid + 1] = temp[2 * thid + 1] + g_idata[bid * bdim * 2 + 2 * thid + 1]; } else { g_odata[bid * bdim * 2 + 2 * thid + 1] = temp[2 * thid + 2]; } } // modified prefix sum code from // https://www.mimuw.edu.pl/~ps209291/kgkp/slides/scan.pdf __global__ void scan_y(int* g_odata, int* g_idata, int n) { extern __shared__ int temp[]; // allocated on invocation int thid = threadIdx.x; int bid = blockIdx.x; int bdim = blockDim.x; int gdim = gridDim.x; int offset = 1; temp[2 * thid] = g_idata[bid + 2 * thid * gdim]; // load input into shared memory temp[2 * thid + 1] = g_idata[bid + 2 * thid * gdim + gdim]; for (int d = n >> 1; d > 0; d >>= 1) { // build sum in place up the tree __syncthreads(); if (thid < d) { int ai = offset * (2 * thid + 1) - 1; int bi = offset * (2 * thid + 2) - 1; temp[bi] += temp[ai]; } offset *= 2; } if (thid == 0) { temp[n - 1] = 0; } // clear the last element for (int d = 1; d < n; d *= 2) { // traverse down tree & build scan offset >>= 1; __syncthreads(); if (thid < d) { int ai = offset * (2 * thid + 1) - 1; int bi = offset * (2 * thid + 2) - 1; int t = temp[ai]; temp[ai] = temp[bi]; temp[bi] += t; } } __syncthreads(); g_odata[bid + 2 * thid * gdim] = temp[2 * thid + 1]; // write results to device memory int second_ind = 2 * thid + 2; if (second_ind == bdim * 2) { g_odata[bid + 2 * thid * gdim + gdim] = temp[2 * thid + 1] + g_idata[bid + 2 * thid * gdim + gdim]; } else { g_odata[bid + 2 * thid * gdim + gdim] = temp[2 * thid + 2]; } } __global__ void make_anchor_mask_kernel( const float* dev_box_anchors_min_x, const float* dev_box_anchors_min_y, const float* dev_box_anchors_max_x, const float* dev_box_anchors_max_y, int* dev_sparse_pillar_map, int* dev_anchor_mask, const float min_x_range, const float min_y_range, const float pillar_x_size, const float pillar_y_size, const int grid_x_size, const int grid_y_size, const int num_inds_for_scan) { int tid = threadIdx.x + blockIdx.x * blockDim.x; int anchor_coor[NUM_2D_BOX_CORNERS_MACRO] = {0}; const int grid_x_size_1 = grid_x_size - 1; // grid_x_size - 1 const int grid_y_size_1 = grid_y_size - 1; // grid_y_size - 1 anchor_coor[0] = floor((dev_box_anchors_min_x[tid] - min_x_range) / pillar_x_size); anchor_coor[1] = floor((dev_box_anchors_min_y[tid] - min_y_range) / pillar_y_size); anchor_coor[2] = floor((dev_box_anchors_max_x[tid] - min_x_range) / pillar_x_size); anchor_coor[3] = floor((dev_box_anchors_max_y[tid] - min_y_range) / pillar_y_size); anchor_coor[0] = max(anchor_coor[0], 0); anchor_coor[1] = max(anchor_coor[1], 0); anchor_coor[2] = min(anchor_coor[2], grid_x_size_1); anchor_coor[3] = min(anchor_coor[3], grid_y_size_1); int right_top = dev_sparse_pillar_map[anchor_coor[3] * num_inds_for_scan + anchor_coor[2]]; int left_bottom = dev_sparse_pillar_map[anchor_coor[1] * num_inds_for_scan + anchor_coor[0]]; int left_top = dev_sparse_pillar_map[anchor_coor[3] * num_inds_for_scan + anchor_coor[0]]; int right_bottom = dev_sparse_pillar_map[anchor_coor[1] * num_inds_for_scan + anchor_coor[2]]; int area = right_top - left_top - right_bottom + left_bottom; if (area > 1) { dev_anchor_mask[tid] = 1; } else { dev_anchor_mask[tid] = 0; } } AnchorMaskCuda::AnchorMaskCuda(const int num_threads, const int num_inds_for_scan, const int num_anchor, const float min_x_range, const float min_y_range, const float pillar_x_size, const float pillar_y_size, const int grid_x_size, const int grid_y_size) : num_threads_(num_threads), num_inds_for_scan_(num_inds_for_scan), num_anchor_(num_anchor), min_x_range_(min_x_range), min_y_range_(min_y_range), pillar_x_size_(pillar_x_size), pillar_y_size_(pillar_y_size), grid_x_size_(grid_x_size), grid_y_size_(grid_y_size) {} // TODO(chenjiahao): set anchor ranges for each class void AnchorMaskCuda::DoAnchorMaskCuda( int* dev_sparse_pillar_map, int* dev_cumsum_along_x, int* dev_cumsum_along_y, const float* dev_box_anchors_min_x, const float* dev_box_anchors_min_y, const float* dev_box_anchors_max_x, const float* dev_box_anchors_max_y, int* dev_anchor_mask) { scan_x<<<num_inds_for_scan_, num_inds_for_scan_ / 2, num_inds_for_scan_ * sizeof(int)>>>( dev_cumsum_along_x, dev_sparse_pillar_map, num_inds_for_scan_); scan_y<<<num_inds_for_scan_, num_inds_for_scan_ / 2, num_inds_for_scan_ * sizeof(int)>>>( dev_cumsum_along_y, dev_cumsum_along_x, num_inds_for_scan_); GPU_CHECK(cudaMemcpy(dev_sparse_pillar_map, dev_cumsum_along_y, num_inds_for_scan_ * num_inds_for_scan_ * sizeof(int), cudaMemcpyDeviceToDevice)); int num_blocks = DIVUP(num_anchor_, num_threads_); make_anchor_mask_kernel<<<num_blocks, num_threads_>>>( dev_box_anchors_min_x, dev_box_anchors_min_y, dev_box_anchors_max_x, dev_box_anchors_max_y, dev_sparse_pillar_map, dev_anchor_mask, min_x_range_, min_y_range_, pillar_x_size_, pillar_y_size_, grid_x_size_, grid_y_size_, num_inds_for_scan_); } } // namespace lidar } // namespace perception } // namespace apollo
df6643aad6026f95bf48fa5bcdeb42c56a4e2d4c.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /* Copyright (c) 2016, David lu All rights reserved. Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met: * Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer. * Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution. * Neither the name of the <organization> nor the names of its contributors may be used to endorse or promote products derived from this software without specific prior written permission. THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ #include <stdio.h> #include <stdlib.h> #define BIN_SIZE 32 using namespace std; #define CHECK(res) if(res!=hipSuccess){exit(-1);} #define BLOCKNUM 1024 #define THREADNUM 64 __global__ void _k_copy_padding_data_blob_gpu(float_t *data_input, float_t *data_output, int num, int input_dim, int channel, int pad) { int tid = threadIdx.x; int bid = blockIdx.x; int threadid = bid * THREADNUM + tid; int output_dim = input_dim + pad * 2; int output_length = output_dim * output_dim * channel; int in_start, row, col; int data_row, data_col; int indata_length = input_dim * input_dim * channel; for (int j = threadid; j < num * output_length; j += BLOCKNUM * THREADNUM) { data_row = j / output_length; data_col = j % output_length; row = data_col / (output_dim * channel); //col = (data_col % (output_dim * channel)) / channel; col = (data_col / channel) % output_dim; if (row >= pad && row < output_dim - pad) { if (col >= pad && col < output_dim - pad) { in_start = ((row - pad) * input_dim + (col - pad)) * channel + data_col % channel; data_output[j] = data_input[data_row * indata_length + in_start]; } else data_output[j] = 0.0; } else data_output[j] = 0.0; } } extern "C" void copy_padding_data_blob_gpu(float_t *&data, int num, int input_dim, int channel, int pad, float_t *&out_data) { hipLaunchKernelGGL(( _k_copy_padding_data_blob_gpu), dim3(BLOCKNUM), dim3(THREADNUM), 0, 0, data, out_data, num, input_dim, channel, pad); hipDeviceSynchronize(); } __global__ void _k_append_padding_data_blob_gpu(float_t **data_input, float_t **data_output, int num, int input_dim, int channel, int pad) { int tid = threadIdx.x; int bid = blockIdx.x; int threadid = bid * THREADNUM + tid; int output_dim = input_dim + pad; int output_length = output_dim * output_dim * channel; int out_start, in_start, row, col; int data_row, data_col; for (int j = threadid; j < num * output_length; j += BLOCKNUM * THREADNUM) { data_row = j / output_length; data_col = j % output_length; out_start = data_col; row = data_col / (output_dim * channel); //col = (data_col % (output_dim * channel)) / channel; col = (data_col / channel) % output_dim; if (row < output_dim - pad) { if (col < output_dim - pad) { in_start = ((row) * input_dim + col) * channel + data_col % channel; data_output[data_row][out_start] = data_input[data_row][in_start]; } else data_output[data_row][out_start] = 0.0; } else data_output[data_row][out_start] = 0.0; } } extern "C" void append_padding_data_blob_gpu(float_t **&data, int num, int input_dim, int channel, int pad, float_t **&out_data) { hipLaunchKernelGGL(( _k_append_padding_data_blob_gpu), dim3(BLOCKNUM), dim3(THREADNUM), 0, 0, data, out_data, num, input_dim, channel, pad); hipDeviceSynchronize(); } __global__ void _k_copy_unpadding_data_gpu(float_t *data_input, float_t *data_output, int num, int input_dim, int channel, int pad) { int tid = threadIdx.x; int bid = blockIdx.x; int threadid = bid * THREADNUM + tid; int length = input_dim * input_dim * channel; int output_dim = input_dim + 2 * pad; int indata_length = output_dim * output_dim * channel; int in_start, row, col; int data_row, data_col; for (int j = threadid; j < num * length; j += BLOCKNUM * THREADNUM) { data_row = j / length; data_col = j % length; row = data_col / (input_dim * channel); //col = (data_col % (input_dim * channel)) / channel; col = (data_col / channel) % input_dim; data_output[j] = 0.0; in_start = ((row + pad) * output_dim + (col + pad)) * channel + data_col % channel; data_output[j] = data_input[data_row * indata_length + in_start]; } } extern "C" void copy_unpadding_data_gpu(float_t *&data, int num, int input_dim, int channel, int pad, float_t *&out_data) { hipLaunchKernelGGL(( _k_copy_unpadding_data_gpu), dim3(BLOCKNUM), dim3(THREADNUM), 0, 0, data, out_data, num, input_dim, channel, pad); hipDeviceSynchronize(); } __global__ void _k_append_unpadding_data_gpu(float_t **data_input, float_t **data_output, int num, int input_dim, int channel, int pad) { int tid = threadIdx.x; int bid = blockIdx.x; int threadid = bid * THREADNUM + tid; int output_length = input_dim * input_dim * channel; int output_dim = input_dim + pad; int out_start, in_start, row, col; int data_row, data_col; for (int j = threadid; j < num * output_length; j += BLOCKNUM * THREADNUM) { data_row = j / output_length; data_col = j % output_length; out_start = data_col; row = data_col / (input_dim * channel); //col =(data_col % (input_dim * channel)) / channel; col = (data_col / channel) % input_dim; in_start = ((row) * output_dim + (col)) * channel + data_col % channel; data_output[data_row][out_start] = data_input[data_row][in_start]; } } extern "C" void append_unpadding_data_gpu(float_t **&data, int num, int input_dim, int channel, int pad, float_t **&out_data) { hipLaunchKernelGGL(( _k_append_unpadding_data_gpu), dim3(BLOCKNUM), dim3(THREADNUM), 0, 0, data, out_data, num, input_dim, channel, pad); hipDeviceSynchronize(); } __global__ void _k_copy_padding_data_sign_gpu(unsigned int *data_input, unsigned int *data_output, int num, int input_dim, int channel, int pad) { int tid = threadIdx.x; int bid = blockIdx.x; int threadid = bid * THREADNUM + tid; int output_dim = input_dim + pad * 2; int output_length = output_dim * output_dim * channel; int input_length = input_dim * input_dim * channel; int in_start, row, col; int data_row, data_col; for (int j = threadid; j < num * output_length; j += BLOCKNUM * THREADNUM) { data_row = j / output_length; data_col = j % output_length; row = data_col / (output_dim * channel); //col = (data_col % (output_dim * channel)) / channel; col = (data_col / channel) % output_dim; if (row >= pad && row < output_dim - pad) { if (col >= pad && col < output_dim - pad) { in_start = ((row - pad) * input_dim + (col - pad)) * channel + data_col % channel; data_output[j] = data_input[data_row * input_length + in_start]; } else data_output[j] = 0; } else data_output[j] = 0; } } extern "C" void copy_padding_data_sign_gpu(unsigned int *&data, int num, int input_dim, int channel, int pad, unsigned int *&out_data) { hipLaunchKernelGGL(( _k_copy_padding_data_sign_gpu), dim3(BLOCKNUM), dim3(THREADNUM), 0, 0, data, out_data, num, input_dim, channel, pad); hipDeviceSynchronize(); } __global__ void _k_img2col_gpu(float_t *data_input, float_t *data_output, int num, int block_size, int output_length, int channel, int input_dim, int output_dim, int stride, int kernel_size) { int tid = threadIdx.x; int bid = blockIdx.x; int border = input_dim - output_dim; int out_start, in_start, in; int data_row, data_col; int k_row, k_col, c; int indata_length = input_dim * input_dim * channel; int outdata_length = output_length * block_size; for (int j = bid; j < num * output_length; j += BLOCKNUM) { data_row = j / output_length; data_col = j % output_length; out_start = data_col * (block_size); in_start = (data_col + (data_col / output_dim) * border) * channel; for (int i = tid; i < block_size; i += THREADNUM) { k_row = (i % (kernel_size * kernel_size)) / kernel_size; k_col = i % kernel_size; c = i / (kernel_size * kernel_size); in = in_start + (k_row * input_dim + k_col) * channel + c; data_output[data_row * outdata_length + out_start + i] = data_input[data_row * indata_length + in]; } __syncthreads(); // for (int c = 0; c < channel; c++) { // for (int ki = 0; ki < kernel_size; ki++) { // for (int kj = 0; kj < kernel_size; kj++) { // in = in_start + (ki * input_dim + kj) * channel + c; // out = out_start + c * block_size + ki * kernel_size + kj; // data_output[data_row * outdata_length + out] = // data_input[data_row * indata_length + in]; // } // } // } } } extern "C" void img2col_gpu(float_t *&data, int num, int channel, int input_dim, int kernel_size, int stride, int output_dim, float_t *&pad_input) { int block_size = kernel_size * kernel_size * channel; int output_length = output_dim * output_dim; hipLaunchKernelGGL(( _k_img2col_gpu), dim3(BLOCKNUM), dim3(THREADNUM), 0, 0, data, pad_input, num, block_size, output_length, channel, input_dim, output_dim, stride, kernel_size); hipDeviceSynchronize(); } __global__ void _k_col2img_gpu(float_t *data, int num, int channel, int input_dim, int output_dim, int stride, int kernel_size, int length, float_t *out_data) { int tid = threadIdx.x; int bid = blockIdx.x; int threadid = bid * THREADNUM + tid; //the set in the input feature map int startset_i, startset_j; //the set in the output feature map int outset_si, outset_sj, outset_i, outset_j; //the count for stride in feature map int count_i, count_j; int data_row, data_col; int k_index, outset_index; int block_size = kernel_size * kernel_size * channel; int indata_length = output_dim * output_dim * block_size; int c; for (int i = threadid; i < num * length; i += BLOCKNUM * THREADNUM) { data_row = i / length; data_col = i % length; out_data[i] = 0.0; startset_i = data_col / (channel * input_dim); startset_j = (data_col / channel) % input_dim; c = data_col % channel; outset_si = startset_i / stride; outset_sj = startset_j / stride; if (outset_si >= output_dim) outset_si = output_dim - 1; if (outset_sj >= output_dim) outset_sj = output_dim - 1; count_i = 0; count_j = 0; while (outset_si - (count_i + 1) >= 0 && ((outset_si - (count_i + 1)) * stride) + kernel_size >= startset_i + 1) { count_i++; } while (outset_sj - (count_j + 1) >= 0 && ((outset_sj - (count_j + 1)) * stride) + kernel_size >= startset_j + 1) { count_j++; } //stride for (int mi = 0; mi <= count_i; mi++) for (int mj = 0; mj <= count_j; mj++) { outset_i = outset_si - mi; outset_j = outset_sj - mj; k_index = ((startset_i - outset_i * stride) * kernel_size + (startset_j - outset_j * stride)) + c * kernel_size * kernel_size; outset_index = (outset_i * output_dim + outset_j) * block_size; out_data[i] += data[data_row * indata_length + outset_index + k_index]; } } } extern "C" void col2img_gpu(float_t *&data, int num, int channel, int input_dim, int kernel_size, int stride, int output_dim, float_t *&pad_input) { int length = input_dim * input_dim * channel; hipLaunchKernelGGL(( _k_col2img_gpu), dim3(BLOCKNUM), dim3(THREADNUM), 0, 0, data, num, channel, input_dim, output_dim, stride, kernel_size, length, pad_input); hipDeviceSynchronize(); } __global__ void _k_img2bitcol_gpu(unsigned int *data_input, unsigned int *data_output, int num, int block_size, int output_length, int channel, int input_dim, int output_dim, int stride, int length, int kernel_size) { int tid = threadIdx.x; int bid = blockIdx.x; int threadid = bid * THREADNUM + tid; int border = input_dim - output_dim; int sp[BIN_SIZE] = { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 }; unsigned int R[BIN_SIZE] = { 1, 2, 4, 8, 16, 32, 64, 128, 256, 512, 1024, 2048, 4096, 8192, 16384, 32768, 65536, 131072, 262144, 524288, 1048576, 2097152, 4194304, 8388608, 16777216, 33554432, 67108864, 134217728, 268435456, 536870912, 1073741824, 2147483648 }; int end_flag = kernel_size * kernel_size * channel - 1; int count = 0, index, out_start, in_start, in; int data_row, data_col; unsigned int data = 0; int outdata_length = output_length * block_size; int indata_length = input_dim * input_dim * channel; for (int j = threadid; j < num * output_length; j += BLOCKNUM * THREADNUM) { data_row = j / output_length; data_col = j % output_length; out_start = data_col * block_size; in_start = (data_col + (data_col / output_dim) * border) * channel; count = 0; for (int c = 0; c < channel; c++) { for (int ki = 0; ki < kernel_size; ki++) { for (int kj = 0; kj < kernel_size; kj++) { in = in_start + (ki * input_dim + kj) * channel + c; index = count % BIN_SIZE; sp[index] = data_input[data_row * indata_length + in]; if (index == BIN_SIZE - 1 || count == end_flag) { for (int i = 0; i < BIN_SIZE; i++) { data += R[i] * sp[i]; } data_output[data_row * outdata_length + out_start] = data; data = 0; out_start += 1; for (int m = 0; m < BIN_SIZE; m++) sp[m] = 0; } count++; } } } } } extern "C" void img2bitcol_gpu(unsigned int *&bin_data, int num, int channel, int input_dim, int kernel_size, int stride, int pad, int output_dim, unsigned int *&pad_input) { clock_t start = clock(); int length; if (channel * kernel_size * kernel_size % BIN_SIZE == 0) length = (channel * kernel_size * kernel_size / BIN_SIZE) * output_dim * output_dim; else length = (channel * kernel_size * kernel_size / BIN_SIZE + 1) * output_dim * output_dim; int block_size = length / (output_dim * output_dim); int output_length = output_dim * output_dim; int input_dim_ = input_dim + 2 * pad; hipLaunchKernelGGL(( _k_img2bitcol_gpu), dim3(BLOCKNUM), dim3(THREADNUM), 0, 0, bin_data, pad_input, num, block_size, output_length, channel, input_dim_, output_dim, stride,length, kernel_size); hipDeviceSynchronize(); } __global__ void _k_copy_data_gpu(float_t **data_input, float_t **data_output, int num, int length, int add) { int tid = threadIdx.x; int bid = blockIdx.x; int threadid = bid * THREADNUM + tid; int out_start, in_start; int data_row; for (int j = threadid; j < num * length; j += BLOCKNUM * THREADNUM) { data_row = j / length; out_start = j % length; in_start = j % length; if (add) { data_output[data_row][out_start] += data_input[data_row][in_start]; } else { data_output[data_row][out_start] = data_input[data_row][in_start]; } } } extern "C" void copy_data_gpu(float_t **&data, float_t **&out_data, int num, int length, int add) { hipLaunchKernelGGL(( _k_copy_data_gpu), dim3(BLOCKNUM), dim3(THREADNUM), 0, 0, data, out_data, num, length, add); hipDeviceSynchronize(); } __global__ void _k_copy_data_bin_gpu(unsigned int **data_input, unsigned int **data_output, int num, int length, int add) { int tid = threadIdx.x; int bid = blockIdx.x; int threadid = bid * THREADNUM + tid; int out_start, in_start; int data_row; for (int j = threadid; j < num * length; j += BLOCKNUM * THREADNUM) { data_row = j / length; out_start = j % length; in_start = j % length; if (add) { data_output[data_row][out_start] += data_input[data_row][in_start]; } else { data_output[data_row][out_start] = data_input[data_row][in_start]; } } } extern "C" void copy_data_bin_gpu(unsigned int **&data, unsigned int **&out_data, int num, int length, int add) { hipLaunchKernelGGL(( _k_copy_data_bin_gpu), dim3(BLOCKNUM), dim3(THREADNUM), 0, 0, data, out_data, num, length, add); hipDeviceSynchronize(); } __global__ void _k_copy2dest_gpu(float_t **data_input, float_t **index_data, float_t **data_output, int num, int input_dim, int output_dim, int channel, int kernel_size, int stride, int length) { int tid = threadIdx.x; int bid = blockIdx.x; int threadid = bid * THREADNUM + tid; //the set in the input feature map int startset_i, startset_j; //the set in the output feature map int outset_si, outset_sj, outset_i, outset_j; //the count for stride in feature map int count_i, count_j; //the index for the data in kernel int offset_i, offset_j; int c; int data_row, data_col; for (int i = threadid; i < num * length; i += BLOCKNUM * THREADNUM) { data_row = i / length; data_col = i % length; startset_i = data_col / (channel * input_dim); startset_j = (data_col / channel) % input_dim; c = data_col % channel; outset_si = startset_i / stride; outset_sj = startset_j / stride; if (outset_si >= output_dim) outset_si = output_dim - 1; if (outset_sj >= output_dim) outset_sj = output_dim - 1; count_i = 0; count_j = 0; while (outset_si - (count_i + 1) >= 0 && ((outset_si - (count_i + 1)) * stride) + kernel_size >= startset_i + 1) { count_i++; } while (outset_sj - (count_j + 1) >= 0 && ((outset_sj - (count_j + 1)) * stride) + kernel_size >= startset_j + 1) { count_j++; } for (int mi = 0; mi <= count_i; mi++) for (int mj = 0; mj <= count_j; mj++) { outset_i = outset_si - mi; outset_j = outset_sj - mj; offset_i = startset_i - outset_i * stride; offset_j = startset_j - outset_j * stride; if (index_data[data_row][(outset_i * output_dim + outset_j) * channel + c] == (float_t) (offset_i * kernel_size + offset_j)) { data_output[data_row][data_col] += data_input[data_row][(outset_i * output_dim + outset_j) * channel + c]; } } } } extern "C" void copy2dest_gpu(float_t **&data, float_t **&index_data, int num, int output_dim, int input_dim, int channel, int kernel_size, int stride, float_t **&out_data) { int length = input_dim * input_dim * channel; hipLaunchKernelGGL(( _k_copy2dest_gpu), dim3(BLOCKNUM), dim3(THREADNUM), 0, 0, data, index_data, out_data, num, input_dim, output_dim, channel, kernel_size, stride, length); hipDeviceSynchronize(); } __global__ void _k_copy2mean_gpu(float_t **data_input, float_t **data_output, int num, int channel, int input_dim, int output_dim, int stride, int kernel_size, int pad, int length) { int tid = threadIdx.x; int bid = blockIdx.x; int threadid = bid * THREADNUM + tid; //the set in the input feature map int startset_i, startset_j; //the set in the output feature map int outset_si, outset_sj, outset_i, outset_j; //the count for stride in feature map int count_i, count_j; int pw, ph; int c; int data_row, data_col; for (int i = threadid; i < num * length; i += BLOCKNUM * THREADNUM) { data_row = i / length; data_col = i % length; startset_i = data_col / (channel * input_dim); startset_j = (data_col / channel) % input_dim; c = data_col % channel; outset_si = startset_i / stride; outset_sj = startset_j / stride; if (outset_si >= output_dim) outset_si = output_dim - 1; if (outset_sj >= output_dim) outset_sj = output_dim - 1; count_i = 0; count_j = 0; while (outset_si - (count_i + 1) >= 0 && ((outset_si - (count_i + 1)) * stride) + kernel_size >= startset_i + 1) { count_i++; } while (outset_sj - (count_j + 1) >= 0 && ((outset_sj - (count_j + 1)) * stride) + kernel_size >= startset_j + 1) { count_j++; } //stride for (int mi = 0; mi <= count_i; mi++) for (int mj = 0; mj <= count_j; mj++) { outset_i = outset_si - mi; outset_j = outset_sj - mj; pw = kernel_size; ph = kernel_size; if (outset_i == output_dim - 1) ph = kernel_size - pad; if (outset_j == output_dim - 1) pw = kernel_size - pad; data_output[data_row][data_col] += (data_input[data_row][(outset_i * output_dim + outset_j) * channel + c] / (float_t) (ph * pw)); } } } extern "C" void copy2mean_gpu(float_t **&data, int num, int output_dim, int input_dim, int channel, int kernel_size, int stride, int pad, float_t **&out_data) { int length = input_dim * input_dim * channel; hipLaunchKernelGGL(( _k_copy2mean_gpu), dim3(BLOCKNUM), dim3(THREADNUM), 0, 0, data, out_data, num, channel, input_dim, output_dim, stride, kernel_size, pad, length); hipDeviceSynchronize(); } __global__ void _k_reset_data_gpu(float_t *data_input, int num, int length) { int tid = threadIdx.x; int bid = blockIdx.x; int threadid = bid * THREADNUM + tid; for (int j = threadid; j < num * length; j += BLOCKNUM * THREADNUM) { data_input[j] = 0; } } extern "C" void reset_data_gpu(float_t *&data, int num, int length) { hipLaunchKernelGGL(( _k_reset_data_gpu), dim3(BLOCKNUM), dim3(THREADNUM), 0, 0, data, num, length); hipDeviceSynchronize(); } __global__ void _k_reset_bin_data_gpu(unsigned int *data_input, int num, int length) { int tid = threadIdx.x; int bid = blockIdx.x; int threadid = bid * THREADNUM + tid; for (int j = threadid; j < num * length; j += BLOCKNUM * THREADNUM) { data_input[j] = 0; } } extern "C" void reset_bin_data_gpu(unsigned int *&data, int num, int length) { hipLaunchKernelGGL(( _k_reset_bin_data_gpu), dim3(BLOCKNUM), dim3(THREADNUM), 0, 0, data, num, length); hipDeviceSynchronize(); } __global__ void _k_set_data_gpu(float_t **data_input, int num, int length, float_t value) { int tid = threadIdx.x; int bid = blockIdx.x; int threadid = bid * THREADNUM + tid; int out_start; int data_row; for (int j = threadid; j < num * length; j += BLOCKNUM * THREADNUM) { data_row = j / length; out_start = j % length; data_input[data_row][out_start] = value; } } extern "C" void set_data_gpu(float_t **&data, int num, int length, float_t value) { hipLaunchKernelGGL(( _k_set_data_gpu), dim3(BLOCKNUM), dim3(THREADNUM), 0, 0, data, num, length, value); hipDeviceSynchronize(); }
df6643aad6026f95bf48fa5bcdeb42c56a4e2d4c.cu
/* Copyright (c) 2016, David lu All rights reserved. Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met: * Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer. * Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution. * Neither the name of the <organization> nor the names of its contributors may be used to endorse or promote products derived from this software without specific prior written permission. THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ #include <stdio.h> #include <stdlib.h> #define BIN_SIZE 32 using namespace std; #define CHECK(res) if(res!=cudaSuccess){exit(-1);} #define BLOCKNUM 1024 #define THREADNUM 64 __global__ void _k_copy_padding_data_blob_gpu(float_t *data_input, float_t *data_output, int num, int input_dim, int channel, int pad) { int tid = threadIdx.x; int bid = blockIdx.x; int threadid = bid * THREADNUM + tid; int output_dim = input_dim + pad * 2; int output_length = output_dim * output_dim * channel; int in_start, row, col; int data_row, data_col; int indata_length = input_dim * input_dim * channel; for (int j = threadid; j < num * output_length; j += BLOCKNUM * THREADNUM) { data_row = j / output_length; data_col = j % output_length; row = data_col / (output_dim * channel); //col = (data_col % (output_dim * channel)) / channel; col = (data_col / channel) % output_dim; if (row >= pad && row < output_dim - pad) { if (col >= pad && col < output_dim - pad) { in_start = ((row - pad) * input_dim + (col - pad)) * channel + data_col % channel; data_output[j] = data_input[data_row * indata_length + in_start]; } else data_output[j] = 0.0; } else data_output[j] = 0.0; } } extern "C" void copy_padding_data_blob_gpu(float_t *&data, int num, int input_dim, int channel, int pad, float_t *&out_data) { _k_copy_padding_data_blob_gpu<<<BLOCKNUM, THREADNUM, 0>>>(data, out_data, num, input_dim, channel, pad); cudaThreadSynchronize(); } __global__ void _k_append_padding_data_blob_gpu(float_t **data_input, float_t **data_output, int num, int input_dim, int channel, int pad) { int tid = threadIdx.x; int bid = blockIdx.x; int threadid = bid * THREADNUM + tid; int output_dim = input_dim + pad; int output_length = output_dim * output_dim * channel; int out_start, in_start, row, col; int data_row, data_col; for (int j = threadid; j < num * output_length; j += BLOCKNUM * THREADNUM) { data_row = j / output_length; data_col = j % output_length; out_start = data_col; row = data_col / (output_dim * channel); //col = (data_col % (output_dim * channel)) / channel; col = (data_col / channel) % output_dim; if (row < output_dim - pad) { if (col < output_dim - pad) { in_start = ((row) * input_dim + col) * channel + data_col % channel; data_output[data_row][out_start] = data_input[data_row][in_start]; } else data_output[data_row][out_start] = 0.0; } else data_output[data_row][out_start] = 0.0; } } extern "C" void append_padding_data_blob_gpu(float_t **&data, int num, int input_dim, int channel, int pad, float_t **&out_data) { _k_append_padding_data_blob_gpu<<<BLOCKNUM, THREADNUM, 0>>>(data, out_data, num, input_dim, channel, pad); cudaThreadSynchronize(); } __global__ void _k_copy_unpadding_data_gpu(float_t *data_input, float_t *data_output, int num, int input_dim, int channel, int pad) { int tid = threadIdx.x; int bid = blockIdx.x; int threadid = bid * THREADNUM + tid; int length = input_dim * input_dim * channel; int output_dim = input_dim + 2 * pad; int indata_length = output_dim * output_dim * channel; int in_start, row, col; int data_row, data_col; for (int j = threadid; j < num * length; j += BLOCKNUM * THREADNUM) { data_row = j / length; data_col = j % length; row = data_col / (input_dim * channel); //col = (data_col % (input_dim * channel)) / channel; col = (data_col / channel) % input_dim; data_output[j] = 0.0; in_start = ((row + pad) * output_dim + (col + pad)) * channel + data_col % channel; data_output[j] = data_input[data_row * indata_length + in_start]; } } extern "C" void copy_unpadding_data_gpu(float_t *&data, int num, int input_dim, int channel, int pad, float_t *&out_data) { _k_copy_unpadding_data_gpu<<<BLOCKNUM, THREADNUM, 0>>>(data, out_data, num, input_dim, channel, pad); cudaThreadSynchronize(); } __global__ void _k_append_unpadding_data_gpu(float_t **data_input, float_t **data_output, int num, int input_dim, int channel, int pad) { int tid = threadIdx.x; int bid = blockIdx.x; int threadid = bid * THREADNUM + tid; int output_length = input_dim * input_dim * channel; int output_dim = input_dim + pad; int out_start, in_start, row, col; int data_row, data_col; for (int j = threadid; j < num * output_length; j += BLOCKNUM * THREADNUM) { data_row = j / output_length; data_col = j % output_length; out_start = data_col; row = data_col / (input_dim * channel); //col =(data_col % (input_dim * channel)) / channel; col = (data_col / channel) % input_dim; in_start = ((row) * output_dim + (col)) * channel + data_col % channel; data_output[data_row][out_start] = data_input[data_row][in_start]; } } extern "C" void append_unpadding_data_gpu(float_t **&data, int num, int input_dim, int channel, int pad, float_t **&out_data) { _k_append_unpadding_data_gpu<<<BLOCKNUM, THREADNUM, 0>>>(data, out_data, num, input_dim, channel, pad); cudaThreadSynchronize(); } __global__ void _k_copy_padding_data_sign_gpu(unsigned int *data_input, unsigned int *data_output, int num, int input_dim, int channel, int pad) { int tid = threadIdx.x; int bid = blockIdx.x; int threadid = bid * THREADNUM + tid; int output_dim = input_dim + pad * 2; int output_length = output_dim * output_dim * channel; int input_length = input_dim * input_dim * channel; int in_start, row, col; int data_row, data_col; for (int j = threadid; j < num * output_length; j += BLOCKNUM * THREADNUM) { data_row = j / output_length; data_col = j % output_length; row = data_col / (output_dim * channel); //col = (data_col % (output_dim * channel)) / channel; col = (data_col / channel) % output_dim; if (row >= pad && row < output_dim - pad) { if (col >= pad && col < output_dim - pad) { in_start = ((row - pad) * input_dim + (col - pad)) * channel + data_col % channel; data_output[j] = data_input[data_row * input_length + in_start]; } else data_output[j] = 0; } else data_output[j] = 0; } } extern "C" void copy_padding_data_sign_gpu(unsigned int *&data, int num, int input_dim, int channel, int pad, unsigned int *&out_data) { _k_copy_padding_data_sign_gpu<<<BLOCKNUM, THREADNUM, 0>>>(data, out_data, num, input_dim, channel, pad); cudaThreadSynchronize(); } __global__ void _k_img2col_gpu(float_t *data_input, float_t *data_output, int num, int block_size, int output_length, int channel, int input_dim, int output_dim, int stride, int kernel_size) { int tid = threadIdx.x; int bid = blockIdx.x; int border = input_dim - output_dim; int out_start, in_start, in; int data_row, data_col; int k_row, k_col, c; int indata_length = input_dim * input_dim * channel; int outdata_length = output_length * block_size; for (int j = bid; j < num * output_length; j += BLOCKNUM) { data_row = j / output_length; data_col = j % output_length; out_start = data_col * (block_size); in_start = (data_col + (data_col / output_dim) * border) * channel; for (int i = tid; i < block_size; i += THREADNUM) { k_row = (i % (kernel_size * kernel_size)) / kernel_size; k_col = i % kernel_size; c = i / (kernel_size * kernel_size); in = in_start + (k_row * input_dim + k_col) * channel + c; data_output[data_row * outdata_length + out_start + i] = data_input[data_row * indata_length + in]; } __syncthreads(); // for (int c = 0; c < channel; c++) { // for (int ki = 0; ki < kernel_size; ki++) { // for (int kj = 0; kj < kernel_size; kj++) { // in = in_start + (ki * input_dim + kj) * channel + c; // out = out_start + c * block_size + ki * kernel_size + kj; // data_output[data_row * outdata_length + out] = // data_input[data_row * indata_length + in]; // } // } // } } } extern "C" void img2col_gpu(float_t *&data, int num, int channel, int input_dim, int kernel_size, int stride, int output_dim, float_t *&pad_input) { int block_size = kernel_size * kernel_size * channel; int output_length = output_dim * output_dim; _k_img2col_gpu<<<BLOCKNUM, THREADNUM, 0>>>(data, pad_input, num, block_size, output_length, channel, input_dim, output_dim, stride, kernel_size); cudaThreadSynchronize(); } __global__ void _k_col2img_gpu(float_t *data, int num, int channel, int input_dim, int output_dim, int stride, int kernel_size, int length, float_t *out_data) { int tid = threadIdx.x; int bid = blockIdx.x; int threadid = bid * THREADNUM + tid; //the set in the input feature map int startset_i, startset_j; //the set in the output feature map int outset_si, outset_sj, outset_i, outset_j; //the count for stride in feature map int count_i, count_j; int data_row, data_col; int k_index, outset_index; int block_size = kernel_size * kernel_size * channel; int indata_length = output_dim * output_dim * block_size; int c; for (int i = threadid; i < num * length; i += BLOCKNUM * THREADNUM) { data_row = i / length; data_col = i % length; out_data[i] = 0.0; startset_i = data_col / (channel * input_dim); startset_j = (data_col / channel) % input_dim; c = data_col % channel; outset_si = startset_i / stride; outset_sj = startset_j / stride; if (outset_si >= output_dim) outset_si = output_dim - 1; if (outset_sj >= output_dim) outset_sj = output_dim - 1; count_i = 0; count_j = 0; while (outset_si - (count_i + 1) >= 0 && ((outset_si - (count_i + 1)) * stride) + kernel_size >= startset_i + 1) { count_i++; } while (outset_sj - (count_j + 1) >= 0 && ((outset_sj - (count_j + 1)) * stride) + kernel_size >= startset_j + 1) { count_j++; } //stride for (int mi = 0; mi <= count_i; mi++) for (int mj = 0; mj <= count_j; mj++) { outset_i = outset_si - mi; outset_j = outset_sj - mj; k_index = ((startset_i - outset_i * stride) * kernel_size + (startset_j - outset_j * stride)) + c * kernel_size * kernel_size; outset_index = (outset_i * output_dim + outset_j) * block_size; out_data[i] += data[data_row * indata_length + outset_index + k_index]; } } } extern "C" void col2img_gpu(float_t *&data, int num, int channel, int input_dim, int kernel_size, int stride, int output_dim, float_t *&pad_input) { int length = input_dim * input_dim * channel; _k_col2img_gpu<<<BLOCKNUM, THREADNUM, 0>>>(data, num, channel, input_dim, output_dim, stride, kernel_size, length, pad_input); cudaThreadSynchronize(); } __global__ void _k_img2bitcol_gpu(unsigned int *data_input, unsigned int *data_output, int num, int block_size, int output_length, int channel, int input_dim, int output_dim, int stride, int length, int kernel_size) { int tid = threadIdx.x; int bid = blockIdx.x; int threadid = bid * THREADNUM + tid; int border = input_dim - output_dim; int sp[BIN_SIZE] = { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 }; unsigned int R[BIN_SIZE] = { 1, 2, 4, 8, 16, 32, 64, 128, 256, 512, 1024, 2048, 4096, 8192, 16384, 32768, 65536, 131072, 262144, 524288, 1048576, 2097152, 4194304, 8388608, 16777216, 33554432, 67108864, 134217728, 268435456, 536870912, 1073741824, 2147483648 }; int end_flag = kernel_size * kernel_size * channel - 1; int count = 0, index, out_start, in_start, in; int data_row, data_col; unsigned int data = 0; int outdata_length = output_length * block_size; int indata_length = input_dim * input_dim * channel; for (int j = threadid; j < num * output_length; j += BLOCKNUM * THREADNUM) { data_row = j / output_length; data_col = j % output_length; out_start = data_col * block_size; in_start = (data_col + (data_col / output_dim) * border) * channel; count = 0; for (int c = 0; c < channel; c++) { for (int ki = 0; ki < kernel_size; ki++) { for (int kj = 0; kj < kernel_size; kj++) { in = in_start + (ki * input_dim + kj) * channel + c; index = count % BIN_SIZE; sp[index] = data_input[data_row * indata_length + in]; if (index == BIN_SIZE - 1 || count == end_flag) { for (int i = 0; i < BIN_SIZE; i++) { data += R[i] * sp[i]; } data_output[data_row * outdata_length + out_start] = data; data = 0; out_start += 1; for (int m = 0; m < BIN_SIZE; m++) sp[m] = 0; } count++; } } } } } extern "C" void img2bitcol_gpu(unsigned int *&bin_data, int num, int channel, int input_dim, int kernel_size, int stride, int pad, int output_dim, unsigned int *&pad_input) { clock_t start = clock(); int length; if (channel * kernel_size * kernel_size % BIN_SIZE == 0) length = (channel * kernel_size * kernel_size / BIN_SIZE) * output_dim * output_dim; else length = (channel * kernel_size * kernel_size / BIN_SIZE + 1) * output_dim * output_dim; int block_size = length / (output_dim * output_dim); int output_length = output_dim * output_dim; int input_dim_ = input_dim + 2 * pad; _k_img2bitcol_gpu<<<BLOCKNUM, THREADNUM, 0>>>(bin_data, pad_input, num, block_size, output_length, channel, input_dim_, output_dim, stride,length, kernel_size); cudaThreadSynchronize(); } __global__ void _k_copy_data_gpu(float_t **data_input, float_t **data_output, int num, int length, int add) { int tid = threadIdx.x; int bid = blockIdx.x; int threadid = bid * THREADNUM + tid; int out_start, in_start; int data_row; for (int j = threadid; j < num * length; j += BLOCKNUM * THREADNUM) { data_row = j / length; out_start = j % length; in_start = j % length; if (add) { data_output[data_row][out_start] += data_input[data_row][in_start]; } else { data_output[data_row][out_start] = data_input[data_row][in_start]; } } } extern "C" void copy_data_gpu(float_t **&data, float_t **&out_data, int num, int length, int add) { _k_copy_data_gpu<<<BLOCKNUM, THREADNUM, 0>>>(data, out_data, num, length, add); cudaThreadSynchronize(); } __global__ void _k_copy_data_bin_gpu(unsigned int **data_input, unsigned int **data_output, int num, int length, int add) { int tid = threadIdx.x; int bid = blockIdx.x; int threadid = bid * THREADNUM + tid; int out_start, in_start; int data_row; for (int j = threadid; j < num * length; j += BLOCKNUM * THREADNUM) { data_row = j / length; out_start = j % length; in_start = j % length; if (add) { data_output[data_row][out_start] += data_input[data_row][in_start]; } else { data_output[data_row][out_start] = data_input[data_row][in_start]; } } } extern "C" void copy_data_bin_gpu(unsigned int **&data, unsigned int **&out_data, int num, int length, int add) { _k_copy_data_bin_gpu<<<BLOCKNUM, THREADNUM, 0>>>(data, out_data, num, length, add); cudaThreadSynchronize(); } __global__ void _k_copy2dest_gpu(float_t **data_input, float_t **index_data, float_t **data_output, int num, int input_dim, int output_dim, int channel, int kernel_size, int stride, int length) { int tid = threadIdx.x; int bid = blockIdx.x; int threadid = bid * THREADNUM + tid; //the set in the input feature map int startset_i, startset_j; //the set in the output feature map int outset_si, outset_sj, outset_i, outset_j; //the count for stride in feature map int count_i, count_j; //the index for the data in kernel int offset_i, offset_j; int c; int data_row, data_col; for (int i = threadid; i < num * length; i += BLOCKNUM * THREADNUM) { data_row = i / length; data_col = i % length; startset_i = data_col / (channel * input_dim); startset_j = (data_col / channel) % input_dim; c = data_col % channel; outset_si = startset_i / stride; outset_sj = startset_j / stride; if (outset_si >= output_dim) outset_si = output_dim - 1; if (outset_sj >= output_dim) outset_sj = output_dim - 1; count_i = 0; count_j = 0; while (outset_si - (count_i + 1) >= 0 && ((outset_si - (count_i + 1)) * stride) + kernel_size >= startset_i + 1) { count_i++; } while (outset_sj - (count_j + 1) >= 0 && ((outset_sj - (count_j + 1)) * stride) + kernel_size >= startset_j + 1) { count_j++; } for (int mi = 0; mi <= count_i; mi++) for (int mj = 0; mj <= count_j; mj++) { outset_i = outset_si - mi; outset_j = outset_sj - mj; offset_i = startset_i - outset_i * stride; offset_j = startset_j - outset_j * stride; if (index_data[data_row][(outset_i * output_dim + outset_j) * channel + c] == (float_t) (offset_i * kernel_size + offset_j)) { data_output[data_row][data_col] += data_input[data_row][(outset_i * output_dim + outset_j) * channel + c]; } } } } extern "C" void copy2dest_gpu(float_t **&data, float_t **&index_data, int num, int output_dim, int input_dim, int channel, int kernel_size, int stride, float_t **&out_data) { int length = input_dim * input_dim * channel; _k_copy2dest_gpu<<<BLOCKNUM, THREADNUM, 0>>>(data, index_data, out_data, num, input_dim, output_dim, channel, kernel_size, stride, length); cudaThreadSynchronize(); } __global__ void _k_copy2mean_gpu(float_t **data_input, float_t **data_output, int num, int channel, int input_dim, int output_dim, int stride, int kernel_size, int pad, int length) { int tid = threadIdx.x; int bid = blockIdx.x; int threadid = bid * THREADNUM + tid; //the set in the input feature map int startset_i, startset_j; //the set in the output feature map int outset_si, outset_sj, outset_i, outset_j; //the count for stride in feature map int count_i, count_j; int pw, ph; int c; int data_row, data_col; for (int i = threadid; i < num * length; i += BLOCKNUM * THREADNUM) { data_row = i / length; data_col = i % length; startset_i = data_col / (channel * input_dim); startset_j = (data_col / channel) % input_dim; c = data_col % channel; outset_si = startset_i / stride; outset_sj = startset_j / stride; if (outset_si >= output_dim) outset_si = output_dim - 1; if (outset_sj >= output_dim) outset_sj = output_dim - 1; count_i = 0; count_j = 0; while (outset_si - (count_i + 1) >= 0 && ((outset_si - (count_i + 1)) * stride) + kernel_size >= startset_i + 1) { count_i++; } while (outset_sj - (count_j + 1) >= 0 && ((outset_sj - (count_j + 1)) * stride) + kernel_size >= startset_j + 1) { count_j++; } //stride for (int mi = 0; mi <= count_i; mi++) for (int mj = 0; mj <= count_j; mj++) { outset_i = outset_si - mi; outset_j = outset_sj - mj; pw = kernel_size; ph = kernel_size; if (outset_i == output_dim - 1) ph = kernel_size - pad; if (outset_j == output_dim - 1) pw = kernel_size - pad; data_output[data_row][data_col] += (data_input[data_row][(outset_i * output_dim + outset_j) * channel + c] / (float_t) (ph * pw)); } } } extern "C" void copy2mean_gpu(float_t **&data, int num, int output_dim, int input_dim, int channel, int kernel_size, int stride, int pad, float_t **&out_data) { int length = input_dim * input_dim * channel; _k_copy2mean_gpu<<<BLOCKNUM, THREADNUM, 0>>>(data, out_data, num, channel, input_dim, output_dim, stride, kernel_size, pad, length); cudaThreadSynchronize(); } __global__ void _k_reset_data_gpu(float_t *data_input, int num, int length) { int tid = threadIdx.x; int bid = blockIdx.x; int threadid = bid * THREADNUM + tid; for (int j = threadid; j < num * length; j += BLOCKNUM * THREADNUM) { data_input[j] = 0; } } extern "C" void reset_data_gpu(float_t *&data, int num, int length) { _k_reset_data_gpu<<<BLOCKNUM, THREADNUM, 0>>>(data, num, length); cudaThreadSynchronize(); } __global__ void _k_reset_bin_data_gpu(unsigned int *data_input, int num, int length) { int tid = threadIdx.x; int bid = blockIdx.x; int threadid = bid * THREADNUM + tid; for (int j = threadid; j < num * length; j += BLOCKNUM * THREADNUM) { data_input[j] = 0; } } extern "C" void reset_bin_data_gpu(unsigned int *&data, int num, int length) { _k_reset_bin_data_gpu<<<BLOCKNUM, THREADNUM, 0>>>(data, num, length); cudaThreadSynchronize(); } __global__ void _k_set_data_gpu(float_t **data_input, int num, int length, float_t value) { int tid = threadIdx.x; int bid = blockIdx.x; int threadid = bid * THREADNUM + tid; int out_start; int data_row; for (int j = threadid; j < num * length; j += BLOCKNUM * THREADNUM) { data_row = j / length; out_start = j % length; data_input[data_row][out_start] = value; } } extern "C" void set_data_gpu(float_t **&data, int num, int length, float_t value) { _k_set_data_gpu<<<BLOCKNUM, THREADNUM, 0>>>(data, num, length, value); cudaThreadSynchronize(); }
7856183b46eaf96b03a73dd6be6918fdaf8e08aa.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" __global__ void ori_sgemm(float *A, float *B, float *C, int NORMAL_M, int NORMAL_N, int NORMAL_K, int iteration) { int lda = NORMAL_M; int ldb = NORMAL_N; int ldc = NORMAL_M; float alpha = 2.0f; float beta = 2.0f; for (int loop = 0; loop < iteration; loop++) { // Partial results float c[TILE_N]; for (int i = 0; i < TILE_N; i++) c[i] = 0.0f; int mid = threadIdx.y * blockDim.x + threadIdx.x; //flattened id int m = blockIdx.x * TILE_M + mid; int n = blockIdx.y * TILE_N + threadIdx.x; __shared__ float b_s[TILE_TB_HEIGHT][TILE_N]; for (int i = 0; i < NORMAL_K; i += TILE_TB_HEIGHT) { float a; b_s[threadIdx.y][threadIdx.x] = B[n + (i + threadIdx.y) * ldb]; __syncthreads(); for (int j = 0; j < TILE_TB_HEIGHT; j++) { a = A[m + (i + j) * lda]; for (int kk = 0; kk < TILE_N; kk++) c[kk] += a * b_s[j][kk]; } __syncthreads(); } int t = ldc * blockIdx.y * TILE_N + m; for (int i = 0; i < TILE_N; i++) { C[t + i * ldc] = C[t + i * ldc] * beta + alpha * c[i]; } } } __global__ void ori_sgemm(float *A, float *B, float *C, int NORMAL_M, int NORMAL_N, int NORMAL_K, int iteration, unsigned int*smid) { int lda = NORMAL_M; int ldb = NORMAL_N; int ldc = NORMAL_M; float alpha = 2.0f; float beta = 2.0f; if(threadIdx.x == 0) { smid[blockIdx.y * gridDim.x + blockIdx.x] = 0; unsigned int ret = 0; asm("mov.u32 %0, %smid;" : "=r"(ret)); smid[blockIdx.y * gridDim.x + blockIdx.x] = ret; } for (int loop = 0; loop < iteration; loop++) { // Partial results float c[TILE_N]; for (int i = 0; i < TILE_N; i++) c[i] = 0.0f; int mid = threadIdx.y * blockDim.x + threadIdx.x; //flattened id int m = blockIdx.x * TILE_M + mid; int n = blockIdx.y * TILE_N + threadIdx.x; __shared__ float b_s[TILE_TB_HEIGHT][TILE_N]; for (int i = 0; i < NORMAL_K; i += TILE_TB_HEIGHT) { float a; b_s[threadIdx.y][threadIdx.x] = B[n + (i + threadIdx.y) * ldb]; __syncthreads(); for (int j = 0; j < TILE_TB_HEIGHT; j++) { a = A[m + (i + j) * lda]; for (int kk = 0; kk < TILE_N; kk++) c[kk] += a * b_s[j][kk]; } __syncthreads(); } int t = ldc * blockIdx.y * TILE_N + m; for (int i = 0; i < TILE_N; i++) { C[t + i * ldc] = C[t + i * ldc] * beta + alpha * c[i]; } } } __global__ void pers_sgemm(float *A, float *B, float *C, int NORMAL_M, int NORMAL_N, int NORMAL_K, int grid_dimension_x, int grid_dimension_y, int block_dimension_x, int block_dimension_y, int iteration, unsigned int*smid) { int lda = NORMAL_M; int ldb = NORMAL_N; int ldc = NORMAL_M; float alpha = 2.0f; float beta = 2.0f; if(threadIdx.x == 0) { smid[blockIdx.y * gridDim.x + blockIdx.x] = 0; unsigned int ret = 0; asm("mov.u32 %0, %smid;" : "=r"(ret)); smid[blockIdx.y * gridDim.x + blockIdx.x] = ret; } unsigned int block_pos = blockIdx.x; int thread_id_x = threadIdx.x % block_dimension_x; int thread_id_y = threadIdx.x / block_dimension_x; __shared__ float b_s[TILE_TB_HEIGHT][TILE_N]; for (;; block_pos += gridDim.x) { if (block_pos >= grid_dimension_x * grid_dimension_y) { return; } int block_id_x = block_pos % grid_dimension_x; int block_id_y = block_pos / grid_dimension_x; for (int loop = 0; loop < iteration; loop++) { // Partial results float c[TILE_N]; for (int i = 0; i < TILE_N; i++) c[i] = 0.0f; int mid = threadIdx.x; int m = block_id_x * TILE_M + mid; int n = block_id_y * TILE_N + thread_id_x; for (int i = 0; i < NORMAL_K; i += TILE_TB_HEIGHT) { float a; b_s[thread_id_y][thread_id_x] = B[n + (i + thread_id_y) * ldb]; __syncthreads(); for (int j = 0; j < TILE_TB_HEIGHT; j++) { a = A[m + (i + j) * lda]; for (int kk = 0; kk < TILE_N; kk++) c[kk] += a * b_s[j][kk]; } __syncthreads(); } int t = ldc * block_id_y * TILE_N + m; for (int i = 0; i < TILE_N; i++) { C[t + i * ldc] = C[t + i * ldc] * beta + alpha * c[i]; } } } } __global__ void pers_sgemm(float *A, float *B, float *C, int NORMAL_M, int NORMAL_N, int NORMAL_K, int grid_dimension_x, int grid_dimension_y, int block_dimension_x, int block_dimension_y, int iteration) { int lda = NORMAL_M; int ldb = NORMAL_N; int ldc = NORMAL_M; float alpha = 2.0f; float beta = 2.0f; unsigned int block_pos = blockIdx.x; int thread_id_x = threadIdx.x % block_dimension_x; int thread_id_y = threadIdx.x / block_dimension_x; __shared__ float b_s[TILE_TB_HEIGHT][TILE_N]; for (;; block_pos += gridDim.x) { if (block_pos >= grid_dimension_x * grid_dimension_y) { return; } int block_id_x = block_pos % grid_dimension_x; int block_id_y = block_pos / grid_dimension_x; for (int loop = 0; loop < iteration; loop++) { // Partial results float c[TILE_N]; for (int i = 0; i < TILE_N; i++) c[i] = 0.0f; int mid = threadIdx.x; int m = block_id_x * TILE_M + mid; int n = block_id_y * TILE_N + thread_id_x; for (int i = 0; i < NORMAL_K; i += TILE_TB_HEIGHT) { float a; b_s[thread_id_y][thread_id_x] = B[n + (i + thread_id_y) * ldb]; __syncthreads(); for (int j = 0; j < TILE_TB_HEIGHT; j++) { a = A[m + (i + j) * lda]; for (int kk = 0; kk < TILE_N; kk++) c[kk] += a * b_s[j][kk]; } __syncthreads(); } int t = ldc * block_id_y * TILE_N + m; for (int i = 0; i < TILE_N; i++) { C[t + i * ldc] = C[t + i * ldc] * beta + alpha * c[i]; } } } } __device__ void mix_sgemm(float *A, float *B, float *C, int NORMAL_M, int NORMAL_N, int NORMAL_K, int grid_dimension_x, int grid_dimension_y, int block_dimension_x, int block_dimension_y, int iteration, int thread_step) { int lda = NORMAL_M; int ldb = NORMAL_N; int ldc = NORMAL_M; float alpha = 2.0f; float beta = 2.0f; unsigned int block_pos = blockIdx.x; int thread_id_x = (threadIdx.x - thread_step) % block_dimension_x; int thread_id_y = (threadIdx.x - thread_step) / block_dimension_x; __shared__ float b_s[TILE_TB_HEIGHT][TILE_N]; for (;; block_pos += SGEMM_GRID_DIM) { if (block_pos >= grid_dimension_x * grid_dimension_y) { return; } int block_id_x = block_pos % grid_dimension_x; int block_id_y = block_pos / grid_dimension_x; for (int loop = 0; loop < iteration; loop++) { // Partial results float c[TILE_N]; for (int i = 0; i < TILE_N; i++) c[i] = 0.0f; int mid = (threadIdx.x - thread_step); int m = block_id_x * TILE_M + mid; int n = block_id_y * TILE_N + thread_id_x; for (int i = 0; i < NORMAL_K; i += TILE_TB_HEIGHT) { float a; b_s[thread_id_y][thread_id_x] = B[n + (i + thread_id_y) * ldb]; // __syncthreads(); asm volatile("bar.sync %0, %1;" : : "r"(0), "r"(128) : "memory"); for (int j = 0; j < TILE_TB_HEIGHT; j++) { a = A[m + (i + j) * lda]; for (int kk = 0; kk < TILE_N; kk++) c[kk] += a * b_s[j][kk]; } // __syncthreads(); asm volatile("bar.sync %0, %1;" : : "r"(0), "r"(128) : "memory"); } int t = ldc * block_id_y * TILE_N + m; for (int i = 0; i < TILE_N; i++) { C[t + i * ldc] = C[t + i * ldc] * beta + alpha * c[i]; } } } } __device__ void mixx_sgemm(float *A, float *B, float *C, int NORMAL_M, int NORMAL_N, int NORMAL_K, int grid_dimension_x, int grid_dimension_y, int block_dimension_x, int block_dimension_y, int iteration, int thread_step) { int lda = NORMAL_M; int ldb = NORMAL_N; int ldc = NORMAL_M; float alpha = 2.0f; float beta = 2.0f; unsigned int block_pos = blockIdx.x + 68 * (thread_step / (block_dimension_x * block_dimension_y)); int thread_id_x = (threadIdx.x - thread_step) % block_dimension_x; int thread_id_y = (threadIdx.x - thread_step) / block_dimension_x; int tmp = thread_step / (block_dimension_x * block_dimension_y); __shared__ float b_s[TILE_TB_HEIGHT][TILE_N]; for (;; block_pos += 68 * 2) { if (block_pos >= grid_dimension_x * grid_dimension_y) { return; } int block_id_x = block_pos % grid_dimension_x; int block_id_y = block_pos / grid_dimension_x; for (int loop = 0; loop < iteration; loop++) { // Partial results float c[TILE_N]; for (int i = 0; i < TILE_N; i++) c[i] = 0.0f; int mid = (threadIdx.x - thread_step); int m = block_id_x * TILE_M + mid; int n = block_id_y * TILE_N + thread_id_x; for (int i = 0; i < NORMAL_K; i += TILE_TB_HEIGHT) { float a; b_s[thread_id_y][thread_id_x] = B[n + (i + thread_id_y) * ldb]; // __syncthreads(); asm volatile("bar.sync %0, %1;" : : "r"(tmp), "r"(128) : "memory"); for (int j = 0; j < TILE_TB_HEIGHT; j++) { a = A[m + (i + j) * lda]; for (int kk = 0; kk < TILE_N; kk++) c[kk] += a * b_s[j][kk]; } // __syncthreads(); asm volatile("bar.sync %0, %1;" : : "r"(tmp), "r"(128) : "memory"); } int t = ldc * block_id_y * TILE_N + m; for (int i = 0; i < TILE_N; i++) { C[t + i * ldc] = C[t + i * ldc] * beta + alpha * c[i]; } } } } __global__ void new_sgemm( float *A, float *B, float *C, int NORMAL_M, int NORMAL_N, int NORMAL_K, int grid_dimension_x, int grid_dimension_y, int block_dimension_x, int block_dimension_y, int iteration) { if (threadIdx.x < 128) { // mix_sgemm(A, B, C, NORMAL_M, NORMAL_N, NORMAL_K, grid_dimension_x, grid_dimension_y, block_dimension_x, block_dimension_y, iteration); mixx_sgemm(A, B, C, NORMAL_M, NORMAL_N, NORMAL_K, grid_dimension_x, grid_dimension_y, block_dimension_x, block_dimension_y, iteration, 0); } else { mixx_sgemm(A, B, C, NORMAL_M, NORMAL_N, NORMAL_K, grid_dimension_x, grid_dimension_y, block_dimension_x, block_dimension_y, iteration, 128); } }
7856183b46eaf96b03a73dd6be6918fdaf8e08aa.cu
__global__ void ori_sgemm(float *A, float *B, float *C, int NORMAL_M, int NORMAL_N, int NORMAL_K, int iteration) { int lda = NORMAL_M; int ldb = NORMAL_N; int ldc = NORMAL_M; float alpha = 2.0f; float beta = 2.0f; for (int loop = 0; loop < iteration; loop++) { // Partial results float c[TILE_N]; for (int i = 0; i < TILE_N; i++) c[i] = 0.0f; int mid = threadIdx.y * blockDim.x + threadIdx.x; //flattened id int m = blockIdx.x * TILE_M + mid; int n = blockIdx.y * TILE_N + threadIdx.x; __shared__ float b_s[TILE_TB_HEIGHT][TILE_N]; for (int i = 0; i < NORMAL_K; i += TILE_TB_HEIGHT) { float a; b_s[threadIdx.y][threadIdx.x] = B[n + (i + threadIdx.y) * ldb]; __syncthreads(); for (int j = 0; j < TILE_TB_HEIGHT; j++) { a = A[m + (i + j) * lda]; for (int kk = 0; kk < TILE_N; kk++) c[kk] += a * b_s[j][kk]; } __syncthreads(); } int t = ldc * blockIdx.y * TILE_N + m; for (int i = 0; i < TILE_N; i++) { C[t + i * ldc] = C[t + i * ldc] * beta + alpha * c[i]; } } } __global__ void ori_sgemm(float *A, float *B, float *C, int NORMAL_M, int NORMAL_N, int NORMAL_K, int iteration, unsigned int*smid) { int lda = NORMAL_M; int ldb = NORMAL_N; int ldc = NORMAL_M; float alpha = 2.0f; float beta = 2.0f; if(threadIdx.x == 0) { smid[blockIdx.y * gridDim.x + blockIdx.x] = 0; unsigned int ret = 0; asm("mov.u32 %0, %smid;" : "=r"(ret)); smid[blockIdx.y * gridDim.x + blockIdx.x] = ret; } for (int loop = 0; loop < iteration; loop++) { // Partial results float c[TILE_N]; for (int i = 0; i < TILE_N; i++) c[i] = 0.0f; int mid = threadIdx.y * blockDim.x + threadIdx.x; //flattened id int m = blockIdx.x * TILE_M + mid; int n = blockIdx.y * TILE_N + threadIdx.x; __shared__ float b_s[TILE_TB_HEIGHT][TILE_N]; for (int i = 0; i < NORMAL_K; i += TILE_TB_HEIGHT) { float a; b_s[threadIdx.y][threadIdx.x] = B[n + (i + threadIdx.y) * ldb]; __syncthreads(); for (int j = 0; j < TILE_TB_HEIGHT; j++) { a = A[m + (i + j) * lda]; for (int kk = 0; kk < TILE_N; kk++) c[kk] += a * b_s[j][kk]; } __syncthreads(); } int t = ldc * blockIdx.y * TILE_N + m; for (int i = 0; i < TILE_N; i++) { C[t + i * ldc] = C[t + i * ldc] * beta + alpha * c[i]; } } } __global__ void pers_sgemm(float *A, float *B, float *C, int NORMAL_M, int NORMAL_N, int NORMAL_K, int grid_dimension_x, int grid_dimension_y, int block_dimension_x, int block_dimension_y, int iteration, unsigned int*smid) { int lda = NORMAL_M; int ldb = NORMAL_N; int ldc = NORMAL_M; float alpha = 2.0f; float beta = 2.0f; if(threadIdx.x == 0) { smid[blockIdx.y * gridDim.x + blockIdx.x] = 0; unsigned int ret = 0; asm("mov.u32 %0, %smid;" : "=r"(ret)); smid[blockIdx.y * gridDim.x + blockIdx.x] = ret; } unsigned int block_pos = blockIdx.x; int thread_id_x = threadIdx.x % block_dimension_x; int thread_id_y = threadIdx.x / block_dimension_x; __shared__ float b_s[TILE_TB_HEIGHT][TILE_N]; for (;; block_pos += gridDim.x) { if (block_pos >= grid_dimension_x * grid_dimension_y) { return; } int block_id_x = block_pos % grid_dimension_x; int block_id_y = block_pos / grid_dimension_x; for (int loop = 0; loop < iteration; loop++) { // Partial results float c[TILE_N]; for (int i = 0; i < TILE_N; i++) c[i] = 0.0f; int mid = threadIdx.x; int m = block_id_x * TILE_M + mid; int n = block_id_y * TILE_N + thread_id_x; for (int i = 0; i < NORMAL_K; i += TILE_TB_HEIGHT) { float a; b_s[thread_id_y][thread_id_x] = B[n + (i + thread_id_y) * ldb]; __syncthreads(); for (int j = 0; j < TILE_TB_HEIGHT; j++) { a = A[m + (i + j) * lda]; for (int kk = 0; kk < TILE_N; kk++) c[kk] += a * b_s[j][kk]; } __syncthreads(); } int t = ldc * block_id_y * TILE_N + m; for (int i = 0; i < TILE_N; i++) { C[t + i * ldc] = C[t + i * ldc] * beta + alpha * c[i]; } } } } __global__ void pers_sgemm(float *A, float *B, float *C, int NORMAL_M, int NORMAL_N, int NORMAL_K, int grid_dimension_x, int grid_dimension_y, int block_dimension_x, int block_dimension_y, int iteration) { int lda = NORMAL_M; int ldb = NORMAL_N; int ldc = NORMAL_M; float alpha = 2.0f; float beta = 2.0f; unsigned int block_pos = blockIdx.x; int thread_id_x = threadIdx.x % block_dimension_x; int thread_id_y = threadIdx.x / block_dimension_x; __shared__ float b_s[TILE_TB_HEIGHT][TILE_N]; for (;; block_pos += gridDim.x) { if (block_pos >= grid_dimension_x * grid_dimension_y) { return; } int block_id_x = block_pos % grid_dimension_x; int block_id_y = block_pos / grid_dimension_x; for (int loop = 0; loop < iteration; loop++) { // Partial results float c[TILE_N]; for (int i = 0; i < TILE_N; i++) c[i] = 0.0f; int mid = threadIdx.x; int m = block_id_x * TILE_M + mid; int n = block_id_y * TILE_N + thread_id_x; for (int i = 0; i < NORMAL_K; i += TILE_TB_HEIGHT) { float a; b_s[thread_id_y][thread_id_x] = B[n + (i + thread_id_y) * ldb]; __syncthreads(); for (int j = 0; j < TILE_TB_HEIGHT; j++) { a = A[m + (i + j) * lda]; for (int kk = 0; kk < TILE_N; kk++) c[kk] += a * b_s[j][kk]; } __syncthreads(); } int t = ldc * block_id_y * TILE_N + m; for (int i = 0; i < TILE_N; i++) { C[t + i * ldc] = C[t + i * ldc] * beta + alpha * c[i]; } } } } __device__ void mix_sgemm(float *A, float *B, float *C, int NORMAL_M, int NORMAL_N, int NORMAL_K, int grid_dimension_x, int grid_dimension_y, int block_dimension_x, int block_dimension_y, int iteration, int thread_step) { int lda = NORMAL_M; int ldb = NORMAL_N; int ldc = NORMAL_M; float alpha = 2.0f; float beta = 2.0f; unsigned int block_pos = blockIdx.x; int thread_id_x = (threadIdx.x - thread_step) % block_dimension_x; int thread_id_y = (threadIdx.x - thread_step) / block_dimension_x; __shared__ float b_s[TILE_TB_HEIGHT][TILE_N]; for (;; block_pos += SGEMM_GRID_DIM) { if (block_pos >= grid_dimension_x * grid_dimension_y) { return; } int block_id_x = block_pos % grid_dimension_x; int block_id_y = block_pos / grid_dimension_x; for (int loop = 0; loop < iteration; loop++) { // Partial results float c[TILE_N]; for (int i = 0; i < TILE_N; i++) c[i] = 0.0f; int mid = (threadIdx.x - thread_step); int m = block_id_x * TILE_M + mid; int n = block_id_y * TILE_N + thread_id_x; for (int i = 0; i < NORMAL_K; i += TILE_TB_HEIGHT) { float a; b_s[thread_id_y][thread_id_x] = B[n + (i + thread_id_y) * ldb]; // __syncthreads(); asm volatile("bar.sync %0, %1;" : : "r"(0), "r"(128) : "memory"); for (int j = 0; j < TILE_TB_HEIGHT; j++) { a = A[m + (i + j) * lda]; for (int kk = 0; kk < TILE_N; kk++) c[kk] += a * b_s[j][kk]; } // __syncthreads(); asm volatile("bar.sync %0, %1;" : : "r"(0), "r"(128) : "memory"); } int t = ldc * block_id_y * TILE_N + m; for (int i = 0; i < TILE_N; i++) { C[t + i * ldc] = C[t + i * ldc] * beta + alpha * c[i]; } } } } __device__ void mixx_sgemm(float *A, float *B, float *C, int NORMAL_M, int NORMAL_N, int NORMAL_K, int grid_dimension_x, int grid_dimension_y, int block_dimension_x, int block_dimension_y, int iteration, int thread_step) { int lda = NORMAL_M; int ldb = NORMAL_N; int ldc = NORMAL_M; float alpha = 2.0f; float beta = 2.0f; unsigned int block_pos = blockIdx.x + 68 * (thread_step / (block_dimension_x * block_dimension_y)); int thread_id_x = (threadIdx.x - thread_step) % block_dimension_x; int thread_id_y = (threadIdx.x - thread_step) / block_dimension_x; int tmp = thread_step / (block_dimension_x * block_dimension_y); __shared__ float b_s[TILE_TB_HEIGHT][TILE_N]; for (;; block_pos += 68 * 2) { if (block_pos >= grid_dimension_x * grid_dimension_y) { return; } int block_id_x = block_pos % grid_dimension_x; int block_id_y = block_pos / grid_dimension_x; for (int loop = 0; loop < iteration; loop++) { // Partial results float c[TILE_N]; for (int i = 0; i < TILE_N; i++) c[i] = 0.0f; int mid = (threadIdx.x - thread_step); int m = block_id_x * TILE_M + mid; int n = block_id_y * TILE_N + thread_id_x; for (int i = 0; i < NORMAL_K; i += TILE_TB_HEIGHT) { float a; b_s[thread_id_y][thread_id_x] = B[n + (i + thread_id_y) * ldb]; // __syncthreads(); asm volatile("bar.sync %0, %1;" : : "r"(tmp), "r"(128) : "memory"); for (int j = 0; j < TILE_TB_HEIGHT; j++) { a = A[m + (i + j) * lda]; for (int kk = 0; kk < TILE_N; kk++) c[kk] += a * b_s[j][kk]; } // __syncthreads(); asm volatile("bar.sync %0, %1;" : : "r"(tmp), "r"(128) : "memory"); } int t = ldc * block_id_y * TILE_N + m; for (int i = 0; i < TILE_N; i++) { C[t + i * ldc] = C[t + i * ldc] * beta + alpha * c[i]; } } } } __global__ void new_sgemm( float *A, float *B, float *C, int NORMAL_M, int NORMAL_N, int NORMAL_K, int grid_dimension_x, int grid_dimension_y, int block_dimension_x, int block_dimension_y, int iteration) { if (threadIdx.x < 128) { // mix_sgemm(A, B, C, NORMAL_M, NORMAL_N, NORMAL_K, grid_dimension_x, grid_dimension_y, block_dimension_x, block_dimension_y, iteration); mixx_sgemm(A, B, C, NORMAL_M, NORMAL_N, NORMAL_K, grid_dimension_x, grid_dimension_y, block_dimension_x, block_dimension_y, iteration, 0); } else { mixx_sgemm(A, B, C, NORMAL_M, NORMAL_N, NORMAL_K, grid_dimension_x, grid_dimension_y, block_dimension_x, block_dimension_y, iteration, 128); } }
dcfc1434912cb1ad257c810d426b6eb5b7a97ea7.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include <iostream> #include <iomanip> using namespace std; void Error(hipError_t error) { if (error != hipSuccess){ cout << "ERROR:" << hipGetErrorString(error) << endl; exit(0); } } __global__ void sqr_items_vectors(double* a, double* result, int n) { int tid = blockIdx.x * blockDim.x + threadIdx.x; while (tid < n) { result[tid] = a[tid]*a[tid]; tid += blockDim.x*gridDim.x; } } int main() { int n, size; double *a, *result; double *gpu_a, *gpu_result; cin >> n; size = sizeof(double) * n; a = (double*)malloc(size); result = (double*)malloc(size); for (int i = 0; i < n; ++i) { cin >> a[i]; } Error(hipMalloc(&gpu_a, size)); Error(hipMalloc(&gpu_result, size)); Error(hipMemcpy(gpu_a, a, size, hipMemcpyHostToDevice)); hipLaunchKernelGGL(( sqr_items_vectors), dim3(256), dim3(256), 0, 0, gpu_a, gpu_result, n); Error(hipMemcpy(result, gpu_result, size, hipMemcpyDeviceToHost)); for (int i = 0; i < n; ++i) { cout << scientific << setprecision(10) << result[i] << " "; } cout << endl; Error(hipFree(gpu_a)); Error(hipFree(gpu_result)); free(a); free(result); return 0; }
dcfc1434912cb1ad257c810d426b6eb5b7a97ea7.cu
#include <iostream> #include <iomanip> using namespace std; void Error(cudaError_t error) { if (error != cudaSuccess){ cout << "ERROR:" << cudaGetErrorString(error) << endl; exit(0); } } __global__ void sqr_items_vectors(double* a, double* result, int n) { int tid = blockIdx.x * blockDim.x + threadIdx.x; while (tid < n) { result[tid] = a[tid]*a[tid]; tid += blockDim.x*gridDim.x; } } int main() { int n, size; double *a, *result; double *gpu_a, *gpu_result; cin >> n; size = sizeof(double) * n; a = (double*)malloc(size); result = (double*)malloc(size); for (int i = 0; i < n; ++i) { cin >> a[i]; } Error(cudaMalloc(&gpu_a, size)); Error(cudaMalloc(&gpu_result, size)); Error(cudaMemcpy(gpu_a, a, size, cudaMemcpyHostToDevice)); sqr_items_vectors<<<256, 256>>>(gpu_a, gpu_result, n); Error(cudaMemcpy(result, gpu_result, size, cudaMemcpyDeviceToHost)); for (int i = 0; i < n; ++i) { cout << scientific << setprecision(10) << result[i] << " "; } cout << endl; Error(cudaFree(gpu_a)); Error(cudaFree(gpu_result)); free(a); free(result); return 0; }
085bf2bf33ea69567fd71b7b300888e433f6ae32.hip
// !!! This is a file automatically generated by hipify!!! #define __BSD_SOURCE #include <math.h> // fabsf #include <stdlib.h> // malloc/free #include <stdio.h> // printf #include <time.h> // time #include <sys/time.h> // gettimeofday, timersub #include <hip/hip_runtime.h> // API de cuda #include <cutil_inline.h> // Funciones para chequeo de errores #define N 1024 // dimensiones del bloque #define BLOCK_WIDTH 64 #define BLOCK_HEIGHT 8 // ndice de una coordenada bidimensional de una // matriz NxN en el arreglo que la almacena __host__ __device__ __inline__ uint index(uint y, uint x) { return x + y * N; } // multiplicacin trivial de dos matrices NxN // asume que N es divisible por las dimensiones // del bloque para simplificar el cdigo __global__ void mm_slow(const float * a, const float * b, float * c) { uint x = threadIdx.x + blockIdx.x * blockDim.x; uint y = threadIdx.y + blockIdx.y * blockDim.y; float sum = 0.0f; for (uint i = 0; i < N; ++i) { sum += a[index(y, i)] * b[index(i, x)]; } c[index(y, x)] = sum; } // implementacin trivial ikj en CPU de referencia // con algo de suerte el compilador vectoriza static void mm_cpu(const float * a, const float * b, float * c) { for (uint y = 0; y < N; ++y) { for (uint x = 0; x < N; ++x) { c[index(y,x)] = 0.0f; } for (uint k = 0; k < N; ++k) { for (uint x = 0; x < N; ++x) { c[index(y, x)] += a[index(y, k)] * b[index(k, x)]; } } } } // comprobar dos resultados y listar diferencias significativas static void check_result(const float * reference, const float * other) { for (uint y = 0; y < N; ++y) { for (uint x = 0; x < N; ++x) { if (fabsf(reference[index(y, x)] - other[index(y, x)]) > 0.001f) { printf("y:%u x:%u reference:%f result:%f\n", y, x, reference[index(y, x)], other[index(y, x)]); } } } } int main(int argc, char *argv[]) { // pedir memoria en el host size_t matrix_size = N * N * sizeof(float); float * host_a = (float *) malloc(matrix_size); float * host_b = (float *) malloc(matrix_size); float * host_c = (float *) malloc(matrix_size); float * host_c_reference = (float *) malloc(matrix_size); // llenar A y B con numeros aleatorios srand(time(0)); for (uint y = 0; y < N; ++y) { for (uint x = 0; x < N; ++x) { host_a[index(y, x)] = (float) rand() / RAND_MAX; host_b[index(y, x)] = (float) rand() / RAND_MAX; } } // correr en CPU y tomar el tiempo struct timeval start, finish, elapsed; double cpusecs; gettimeofday(&start, NULL); mm_cpu(host_a, host_b, host_c_reference); gettimeofday(&finish, NULL); timersub(&finish, &start, &elapsed); cpusecs = elapsed.tv_sec + elapsed.tv_usec / 1000000.0; printf("CPU time: %f\n", cpusecs); // pedir memoria en la GPU para A, B y C float * dev_a; float * dev_b; float * dev_c; cutilSafeCall(hipMalloc((void **) &dev_a, matrix_size)); cutilSafeCall(hipMalloc((void **) &dev_b, matrix_size)); cutilSafeCall(hipMalloc((void **) &dev_c, matrix_size)); // copiar A y B al device cutilSafeCall(hipMemcpy(dev_a, host_a, matrix_size, hipMemcpyDefault)); cutilSafeCall(hipMemcpy(dev_b, host_b, matrix_size, hipMemcpyDefault)); // configurar la grilla y lanzar el kernel dim3 block(BLOCK_WIDTH, BLOCK_HEIGHT); dim3 grid(N/block.x, N/block.y); hipLaunchKernelGGL(( mm_slow), dim3(grid), dim3(block), 0, 0, dev_a, dev_b, dev_c); // esperar que termine cutilSafeCall(hipDeviceSynchronize()); // Copiar datos al host y verificar la validez del resultado cutilSafeCall(hipMemcpy(host_c, dev_c, matrix_size, hipMemcpyDefault)); check_result(host_c_reference, host_c); // liberar memoria free(host_a); free(host_b); free(host_c); free(host_c_reference); cutilSafeCall(hipFree(dev_a)); cutilSafeCall(hipFree(dev_b)); cutilSafeCall(hipFree(dev_c)); return 0; }
085bf2bf33ea69567fd71b7b300888e433f6ae32.cu
#define __BSD_SOURCE #include <math.h> // fabsf #include <stdlib.h> // malloc/free #include <stdio.h> // printf #include <time.h> // time #include <sys/time.h> // gettimeofday, timersub #include <cuda.h> // API de cuda #include <cutil_inline.h> // Funciones para chequeo de errores #define N 1024 // dimensiones del bloque #define BLOCK_WIDTH 64 #define BLOCK_HEIGHT 8 // índice de una coordenada bidimensional de una // matriz NxN en el arreglo que la almacena __host__ __device__ __inline__ uint index(uint y, uint x) { return x + y * N; } // multiplicación trivial de dos matrices NxN // asume que N es divisible por las dimensiones // del bloque para simplificar el código __global__ void mm_slow(const float * a, const float * b, float * c) { uint x = threadIdx.x + blockIdx.x * blockDim.x; uint y = threadIdx.y + blockIdx.y * blockDim.y; float sum = 0.0f; for (uint i = 0; i < N; ++i) { sum += a[index(y, i)] * b[index(i, x)]; } c[index(y, x)] = sum; } // implementación trivial ikj en CPU de referencia // con algo de suerte el compilador vectoriza static void mm_cpu(const float * a, const float * b, float * c) { for (uint y = 0; y < N; ++y) { for (uint x = 0; x < N; ++x) { c[index(y,x)] = 0.0f; } for (uint k = 0; k < N; ++k) { for (uint x = 0; x < N; ++x) { c[index(y, x)] += a[index(y, k)] * b[index(k, x)]; } } } } // comprobar dos resultados y listar diferencias significativas static void check_result(const float * reference, const float * other) { for (uint y = 0; y < N; ++y) { for (uint x = 0; x < N; ++x) { if (fabsf(reference[index(y, x)] - other[index(y, x)]) > 0.001f) { printf("y:%u x:%u reference:%f result:%f\n", y, x, reference[index(y, x)], other[index(y, x)]); } } } } int main(int argc, char *argv[]) { // pedir memoria en el host size_t matrix_size = N * N * sizeof(float); float * host_a = (float *) malloc(matrix_size); float * host_b = (float *) malloc(matrix_size); float * host_c = (float *) malloc(matrix_size); float * host_c_reference = (float *) malloc(matrix_size); // llenar A y B con numeros aleatorios srand(time(0)); for (uint y = 0; y < N; ++y) { for (uint x = 0; x < N; ++x) { host_a[index(y, x)] = (float) rand() / RAND_MAX; host_b[index(y, x)] = (float) rand() / RAND_MAX; } } // correr en CPU y tomar el tiempo struct timeval start, finish, elapsed; double cpusecs; gettimeofday(&start, NULL); mm_cpu(host_a, host_b, host_c_reference); gettimeofday(&finish, NULL); timersub(&finish, &start, &elapsed); cpusecs = elapsed.tv_sec + elapsed.tv_usec / 1000000.0; printf("CPU time: %f\n", cpusecs); // pedir memoria en la GPU para A, B y C float * dev_a; float * dev_b; float * dev_c; cutilSafeCall(cudaMalloc((void **) &dev_a, matrix_size)); cutilSafeCall(cudaMalloc((void **) &dev_b, matrix_size)); cutilSafeCall(cudaMalloc((void **) &dev_c, matrix_size)); // copiar A y B al device cutilSafeCall(cudaMemcpy(dev_a, host_a, matrix_size, cudaMemcpyDefault)); cutilSafeCall(cudaMemcpy(dev_b, host_b, matrix_size, cudaMemcpyDefault)); // configurar la grilla y lanzar el kernel dim3 block(BLOCK_WIDTH, BLOCK_HEIGHT); dim3 grid(N/block.x, N/block.y); mm_slow<<<grid, block>>>(dev_a, dev_b, dev_c); // esperar que termine cutilSafeCall(cudaDeviceSynchronize()); // Copiar datos al host y verificar la validez del resultado cutilSafeCall(cudaMemcpy(host_c, dev_c, matrix_size, cudaMemcpyDefault)); check_result(host_c_reference, host_c); // liberar memoria free(host_a); free(host_b); free(host_c); free(host_c_reference); cutilSafeCall(cudaFree(dev_a)); cutilSafeCall(cudaFree(dev_b)); cutilSafeCall(cudaFree(dev_c)); return 0; }
b9d886e2ed1e9b27b1c7bb24618e0274dfc330c5.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /* * Copyright 1993-2007 NVIDIA Corporation. All rights reserved. * * NOTICE TO USER: * * This source code is subject to NVIDIA ownership rights under U.S. and * international Copyright laws. Users and possessors of this source code * are hereby granted a nonexclusive, royalty-free license to use this code * in individual and commercial software. * * NVIDIA MAKES NO REPRESENTATION ABOUT THE SUITABILITY OF THIS SOURCE * CODE FOR ANY PURPOSE. IT IS PROVIDED "AS IS" WITHOUT EXPRESS OR * IMPLIED WARRANTY OF ANY KIND. NVIDIA DISCLAIMS ALL WARRANTIES WITH * REGARD TO THIS SOURCE CODE, INCLUDING ALL IMPLIED WARRANTIES OF * MERCHANTABILITY, NONINFRINGEMENT, AND FITNESS FOR A PARTICULAR PURPOSE. * IN NO EVENT SHALL NVIDIA BE LIABLE FOR ANY SPECIAL, INDIRECT, INCIDENTAL, * OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS * OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE * OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE * OR PERFORMANCE OF THIS SOURCE CODE. * * U.S. Government End Users. This source code is a "commercial item" as * that term is defined at 48 C.F.R. 2.101 (OCT 1995), consisting of * "commercial computer software" and "commercial computer software * documentation" as such terms are used in 48 C.F.R. 12.212 (SEPT 1995) * and is provided to the U.S. Government only as a commercial end item. * Consistent with 48 C.F.R.12.212 and 48 C.F.R. 227.7202-1 through * 227.7202-4 (JUNE 1995), all U.S. Government End Users acquire the * source code with only those rights set forth herein. * * Any use of this source code in individual and commercial software must * include, in the user documentation and internal comments to the code, * the above Disclaimer and U.S. Government End Users Notice. */ /* * This is a simple test showing huge access speed gap * between aligned and misaligned structures * (those having/missing __align__ keyword). * It measures per-element copy throughput for * aligned and misaligned structures on * big chunks of data. */ #include <stdlib.h> #include <stdio.h> #include <string.h> #include <math.h> #include <cutil_inline.h> //////////////////////////////////////////////////////////////////////////////// // Misaligned types //////////////////////////////////////////////////////////////////////////////// typedef unsigned char uint8; typedef unsigned short int uint16; typedef struct{ unsigned char r, g, b, a; } RGBA8_misaligned; typedef struct{ unsigned int l, a; } LA32_misaligned; typedef struct{ unsigned int r, g, b; } RGB32_misaligned; typedef struct{ unsigned int r, g, b, a; } RGBA32_misaligned; //////////////////////////////////////////////////////////////////////////////// // Aligned types //////////////////////////////////////////////////////////////////////////////// typedef struct __align__(4){ unsigned char r, g, b, a; } RGBA8; typedef unsigned int I32; typedef struct __align__(8){ unsigned int l, a; } LA32; typedef struct __align__(16){ unsigned int r, g, b; } RGB32; typedef struct __align__(16){ unsigned int r, g, b, a; } RGBA32; //////////////////////////////////////////////////////////////////////////////// // Because G80 class hardware natively supports global memory operations // only with data elements of 4, 8 and 16 bytes, if structure size // exceeds 16 bytes, it can't be efficiently read or written, // since more than one global memory non-coalescable load/store instructions // will be generated, even if __align__ option is supplied. // "Structure of arrays" storage strategy offers best performance // in general case. See section 5.1.2 of the Programming Guide. //////////////////////////////////////////////////////////////////////////////// typedef struct __align__(16){ RGBA32 c1, c2; } RGBA32_2; //////////////////////////////////////////////////////////////////////////////// // Common host and device functions //////////////////////////////////////////////////////////////////////////////// //Round a / b to nearest higher integer value int iDivUp(int a, int b){ return (a % b != 0) ? (a / b + 1) : (a / b); } //Round a / b to nearest lower integer value int iDivDown(int a, int b){ return a / b; } //Align a to nearest higher multiple of b int iAlignUp(int a, int b){ return (a % b != 0) ? (a - a % b + b) : a; } //Align a to nearest lower multiple of b int iAlignDown(int a, int b){ return a - a % b; } //////////////////////////////////////////////////////////////////////////////// // Simple CUDA kernel. // Copy is carried out on per-element basis, // so it's not per-byte in case of padded structures. //////////////////////////////////////////////////////////////////////////////// template<class TData> __global__ void testKernel( TData *d_odata, TData *d_idata, int numElements ){ const int tid = blockDim.x * blockIdx.x + threadIdx.x; const int numThreads = blockDim.x * gridDim.x; for(int pos = tid; pos < numElements; pos += numThreads) d_odata[pos] = d_idata[pos]; } //////////////////////////////////////////////////////////////////////////////// // Validation routine for simple copy kernel. // We must know "packed" size of TData (number_of_fields * sizeof(simple_type)) // and compare only these "packed" parts of the structure, // containig actual user data. The compiler behavior with padding bytes // is undefined, since padding is merely a placeholder // and doesn't contain any user data. //////////////////////////////////////////////////////////////////////////////// template<class TData> int testCPU( TData *h_odata, TData *h_idata, int numElements, int packedElementSize ){ for(int pos = 0; pos < numElements; pos++){ TData src = h_idata[pos]; TData dst = h_odata[pos]; for(int i = 0; i < packedElementSize; i++) if( ((char *)&src)[i] != ((char *)&dst)[i] ) return 0; } return 1; } //////////////////////////////////////////////////////////////////////////////// // Data configuration //////////////////////////////////////////////////////////////////////////////// //Memory chunk size in bytes. Reused for test #ifdef __DEVICE_EMULATION__ const int MEM_SIZE = 16000000; const int NUM_ITERATIONS = 1; #else const int MEM_SIZE = 50000000; const int NUM_ITERATIONS = 32; #endif //GPU input and output data unsigned char *d_idata, *d_odata; //CPU input data and instance of GPU output data unsigned char *h_idataCPU, *h_odataGPU; unsigned int hTimer; template<class TData> void runTest(int packedElementSize){ const int totalMemSizeAligned = iAlignDown(MEM_SIZE, sizeof(TData)); const int numElements = iDivDown(MEM_SIZE, sizeof(TData)); //Clean output buffer before current test cutilSafeCall( hipMemset(d_odata, 0, MEM_SIZE) ); //Run test cutilSafeCall( hipDeviceSynchronize() ); cutilCheckError( cutResetTimer(hTimer) ); cutilCheckError( cutStartTimer(hTimer) ); for(int i = 0; i < NUM_ITERATIONS; i++){ hipLaunchKernelGGL(( testKernel<TData>), dim3(64), dim3(256), 0, 0, (TData *)d_odata, (TData *)d_idata, numElements ); cutilCheckMsg("testKernel() execution failed\n"); } cutilSafeCall( hipDeviceSynchronize() ); cutilCheckError( cutStopTimer(hTimer) ); double gpuTime = cutGetTimerValue(hTimer) / NUM_ITERATIONS; printf( "Avg. time: %f ms / Copy throughput: %f GB/s.\n", gpuTime, (double)totalMemSizeAligned / (gpuTime * 0.001 * 1073741824.0) ); //Read back GPU results and run validation cutilSafeCall( hipMemcpy(h_odataGPU, d_odata, MEM_SIZE, hipMemcpyDeviceToHost) ); int flag = testCPU( (TData *)h_odataGPU, (TData *)h_idataCPU, numElements, packedElementSize ); printf(flag ? "TEST PASSED\n" : "TEST FAILED\n" ); } int main(int argc, char **argv){ int i; if( cutCheckCmdLineFlag(argc, (const char**)argv, "device") ) cutilDeviceInit(argc, argv); else hipSetDevice( cutGetMaxGflopsDeviceId() ); cutilCheckError( cutCreateTimer(&hTimer) ); printf("Allocating memory...\n"); h_idataCPU = (unsigned char *)malloc(MEM_SIZE); h_odataGPU = (unsigned char *)malloc(MEM_SIZE); cutilSafeCall(hipMalloc((void **)&d_idata, MEM_SIZE)); cutilSafeCall(hipMalloc((void **)&d_odata, MEM_SIZE)); printf("Generating host input data array...\n"); for(i = 0; i < MEM_SIZE; i++) h_idataCPU[i] = (i & 0xFF) + 1; printf("Uploading input data to GPU memory...\n"); cutilSafeCall(hipMemcpy(d_idata, h_idataCPU, MEM_SIZE, hipMemcpyHostToDevice) ); printf("Testing misaligned types...\n"); printf("uint8...\n"); runTest<uint8>(1); printf("uint16...\n"); runTest<uint16>(2); printf("RGBA8_misaligned...\n"); runTest<RGBA8_misaligned>(4); printf("LA32_misaligned...\n"); runTest<LA32_misaligned>(8); printf("RGB32_misaligned...\n"); runTest<RGB32_misaligned>(12); printf("RGBA32_misaligned...\n"); runTest<RGBA32_misaligned>(16); printf("Testing aligned types...\n"); printf("RGBA8...\n"); runTest<RGBA8>(4); printf("I32...\n"); runTest<I32>(4); printf("LA32...\n"); runTest<LA32>(8); printf("RGB32...\n"); runTest<RGB32>(12); printf("RGBA32...\n"); runTest<RGBA32>(16); printf("RGBA32_2...\n"); runTest<RGBA32_2>(32); printf("Shutting down...\n"); cutilSafeCall(hipFree(d_idata)); cutilSafeCall(hipFree(d_odata)); free(h_odataGPU); free(h_idataCPU); cutilCheckError( cutDeleteTimer(hTimer) ); cutilExit(argc, argv); hipDeviceReset(); }
b9d886e2ed1e9b27b1c7bb24618e0274dfc330c5.cu
/* * Copyright 1993-2007 NVIDIA Corporation. All rights reserved. * * NOTICE TO USER: * * This source code is subject to NVIDIA ownership rights under U.S. and * international Copyright laws. Users and possessors of this source code * are hereby granted a nonexclusive, royalty-free license to use this code * in individual and commercial software. * * NVIDIA MAKES NO REPRESENTATION ABOUT THE SUITABILITY OF THIS SOURCE * CODE FOR ANY PURPOSE. IT IS PROVIDED "AS IS" WITHOUT EXPRESS OR * IMPLIED WARRANTY OF ANY KIND. NVIDIA DISCLAIMS ALL WARRANTIES WITH * REGARD TO THIS SOURCE CODE, INCLUDING ALL IMPLIED WARRANTIES OF * MERCHANTABILITY, NONINFRINGEMENT, AND FITNESS FOR A PARTICULAR PURPOSE. * IN NO EVENT SHALL NVIDIA BE LIABLE FOR ANY SPECIAL, INDIRECT, INCIDENTAL, * OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS * OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE * OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE * OR PERFORMANCE OF THIS SOURCE CODE. * * U.S. Government End Users. This source code is a "commercial item" as * that term is defined at 48 C.F.R. 2.101 (OCT 1995), consisting of * "commercial computer software" and "commercial computer software * documentation" as such terms are used in 48 C.F.R. 12.212 (SEPT 1995) * and is provided to the U.S. Government only as a commercial end item. * Consistent with 48 C.F.R.12.212 and 48 C.F.R. 227.7202-1 through * 227.7202-4 (JUNE 1995), all U.S. Government End Users acquire the * source code with only those rights set forth herein. * * Any use of this source code in individual and commercial software must * include, in the user documentation and internal comments to the code, * the above Disclaimer and U.S. Government End Users Notice. */ /* * This is a simple test showing huge access speed gap * between aligned and misaligned structures * (those having/missing __align__ keyword). * It measures per-element copy throughput for * aligned and misaligned structures on * big chunks of data. */ #include <stdlib.h> #include <stdio.h> #include <string.h> #include <math.h> #include <cutil_inline.h> //////////////////////////////////////////////////////////////////////////////// // Misaligned types //////////////////////////////////////////////////////////////////////////////// typedef unsigned char uint8; typedef unsigned short int uint16; typedef struct{ unsigned char r, g, b, a; } RGBA8_misaligned; typedef struct{ unsigned int l, a; } LA32_misaligned; typedef struct{ unsigned int r, g, b; } RGB32_misaligned; typedef struct{ unsigned int r, g, b, a; } RGBA32_misaligned; //////////////////////////////////////////////////////////////////////////////// // Aligned types //////////////////////////////////////////////////////////////////////////////// typedef struct __align__(4){ unsigned char r, g, b, a; } RGBA8; typedef unsigned int I32; typedef struct __align__(8){ unsigned int l, a; } LA32; typedef struct __align__(16){ unsigned int r, g, b; } RGB32; typedef struct __align__(16){ unsigned int r, g, b, a; } RGBA32; //////////////////////////////////////////////////////////////////////////////// // Because G80 class hardware natively supports global memory operations // only with data elements of 4, 8 and 16 bytes, if structure size // exceeds 16 bytes, it can't be efficiently read or written, // since more than one global memory non-coalescable load/store instructions // will be generated, even if __align__ option is supplied. // "Structure of arrays" storage strategy offers best performance // in general case. See section 5.1.2 of the Programming Guide. //////////////////////////////////////////////////////////////////////////////// typedef struct __align__(16){ RGBA32 c1, c2; } RGBA32_2; //////////////////////////////////////////////////////////////////////////////// // Common host and device functions //////////////////////////////////////////////////////////////////////////////// //Round a / b to nearest higher integer value int iDivUp(int a, int b){ return (a % b != 0) ? (a / b + 1) : (a / b); } //Round a / b to nearest lower integer value int iDivDown(int a, int b){ return a / b; } //Align a to nearest higher multiple of b int iAlignUp(int a, int b){ return (a % b != 0) ? (a - a % b + b) : a; } //Align a to nearest lower multiple of b int iAlignDown(int a, int b){ return a - a % b; } //////////////////////////////////////////////////////////////////////////////// // Simple CUDA kernel. // Copy is carried out on per-element basis, // so it's not per-byte in case of padded structures. //////////////////////////////////////////////////////////////////////////////// template<class TData> __global__ void testKernel( TData *d_odata, TData *d_idata, int numElements ){ const int tid = blockDim.x * blockIdx.x + threadIdx.x; const int numThreads = blockDim.x * gridDim.x; for(int pos = tid; pos < numElements; pos += numThreads) d_odata[pos] = d_idata[pos]; } //////////////////////////////////////////////////////////////////////////////// // Validation routine for simple copy kernel. // We must know "packed" size of TData (number_of_fields * sizeof(simple_type)) // and compare only these "packed" parts of the structure, // containig actual user data. The compiler behavior with padding bytes // is undefined, since padding is merely a placeholder // and doesn't contain any user data. //////////////////////////////////////////////////////////////////////////////// template<class TData> int testCPU( TData *h_odata, TData *h_idata, int numElements, int packedElementSize ){ for(int pos = 0; pos < numElements; pos++){ TData src = h_idata[pos]; TData dst = h_odata[pos]; for(int i = 0; i < packedElementSize; i++) if( ((char *)&src)[i] != ((char *)&dst)[i] ) return 0; } return 1; } //////////////////////////////////////////////////////////////////////////////// // Data configuration //////////////////////////////////////////////////////////////////////////////// //Memory chunk size in bytes. Reused for test #ifdef __DEVICE_EMULATION__ const int MEM_SIZE = 16000000; const int NUM_ITERATIONS = 1; #else const int MEM_SIZE = 50000000; const int NUM_ITERATIONS = 32; #endif //GPU input and output data unsigned char *d_idata, *d_odata; //CPU input data and instance of GPU output data unsigned char *h_idataCPU, *h_odataGPU; unsigned int hTimer; template<class TData> void runTest(int packedElementSize){ const int totalMemSizeAligned = iAlignDown(MEM_SIZE, sizeof(TData)); const int numElements = iDivDown(MEM_SIZE, sizeof(TData)); //Clean output buffer before current test cutilSafeCall( cudaMemset(d_odata, 0, MEM_SIZE) ); //Run test cutilSafeCall( cudaThreadSynchronize() ); cutilCheckError( cutResetTimer(hTimer) ); cutilCheckError( cutStartTimer(hTimer) ); for(int i = 0; i < NUM_ITERATIONS; i++){ testKernel<TData><<<64, 256>>>( (TData *)d_odata, (TData *)d_idata, numElements ); cutilCheckMsg("testKernel() execution failed\n"); } cutilSafeCall( cudaThreadSynchronize() ); cutilCheckError( cutStopTimer(hTimer) ); double gpuTime = cutGetTimerValue(hTimer) / NUM_ITERATIONS; printf( "Avg. time: %f ms / Copy throughput: %f GB/s.\n", gpuTime, (double)totalMemSizeAligned / (gpuTime * 0.001 * 1073741824.0) ); //Read back GPU results and run validation cutilSafeCall( cudaMemcpy(h_odataGPU, d_odata, MEM_SIZE, cudaMemcpyDeviceToHost) ); int flag = testCPU( (TData *)h_odataGPU, (TData *)h_idataCPU, numElements, packedElementSize ); printf(flag ? "TEST PASSED\n" : "TEST FAILED\n" ); } int main(int argc, char **argv){ int i; if( cutCheckCmdLineFlag(argc, (const char**)argv, "device") ) cutilDeviceInit(argc, argv); else cudaSetDevice( cutGetMaxGflopsDeviceId() ); cutilCheckError( cutCreateTimer(&hTimer) ); printf("Allocating memory...\n"); h_idataCPU = (unsigned char *)malloc(MEM_SIZE); h_odataGPU = (unsigned char *)malloc(MEM_SIZE); cutilSafeCall(cudaMalloc((void **)&d_idata, MEM_SIZE)); cutilSafeCall(cudaMalloc((void **)&d_odata, MEM_SIZE)); printf("Generating host input data array...\n"); for(i = 0; i < MEM_SIZE; i++) h_idataCPU[i] = (i & 0xFF) + 1; printf("Uploading input data to GPU memory...\n"); cutilSafeCall(cudaMemcpy(d_idata, h_idataCPU, MEM_SIZE, cudaMemcpyHostToDevice) ); printf("Testing misaligned types...\n"); printf("uint8...\n"); runTest<uint8>(1); printf("uint16...\n"); runTest<uint16>(2); printf("RGBA8_misaligned...\n"); runTest<RGBA8_misaligned>(4); printf("LA32_misaligned...\n"); runTest<LA32_misaligned>(8); printf("RGB32_misaligned...\n"); runTest<RGB32_misaligned>(12); printf("RGBA32_misaligned...\n"); runTest<RGBA32_misaligned>(16); printf("Testing aligned types...\n"); printf("RGBA8...\n"); runTest<RGBA8>(4); printf("I32...\n"); runTest<I32>(4); printf("LA32...\n"); runTest<LA32>(8); printf("RGB32...\n"); runTest<RGB32>(12); printf("RGBA32...\n"); runTest<RGBA32>(16); printf("RGBA32_2...\n"); runTest<RGBA32_2>(32); printf("Shutting down...\n"); cutilSafeCall(cudaFree(d_idata)); cutilSafeCall(cudaFree(d_odata)); free(h_odataGPU); free(h_idataCPU); cutilCheckError( cutDeleteTimer(hTimer) ); cutilExit(argc, argv); cudaThreadExit(); }
0b15d8e0731c417525e60f333bf1e1436bf576cd.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #ifndef THC_GENERIC_FILE #define THC_GENERIC_FILE "THH/generic/THHTensorIndex.hip" #else #include "ATen/hip/HIPContext.h" // Check tensor dimensions for index operations, and return the slice size. // src can be nullptr in case of indexFill: in that case it is ignored. static ptrdiff_t THCTensor_(getSliceSize)(THCState *state, THCTensor *dst, int dim, THCudaLongTensor *index, THCTensor *src) { int dstDims = THCTensor_(nDimensionLegacyNoScalars)(state, dst); int srcDims = (src == nullptr) ? dstDims : THCTensor_(nDimensionLegacyNoScalars)(state, src); THArgCheck(THCudaLongTensor_nDimensionLegacyNoScalars(state, index) == 1, 4, "expecting vector of indices"); THArgCheck(dim >= 0 && dim < dstDims, 2, "Indexing dim is out of bounds"); ptrdiff_t dstSliceSize = 1; for (int d = 0; d < dstDims; d++) { if (d != dim) { dstSliceSize *= THTensor_sizeLegacyNoScalars(dst, d); } } if (src == nullptr) return dstSliceSize; THArgCheck(dim < srcDims, 3, "Indexing dim is out of bounds"); THArgCheck(THCudaLongTensor_nElement(state, index) == THTensor_sizeLegacyNoScalars(src, dim), 4, "length of src.size[dim] is not equal to length of indices"); ptrdiff_t srcSliceSize = 1; bool mismatch = false; if (dstDims != srcDims) mismatch = true; for (int d = 0; d < srcDims; d++) { if (d != dim) { srcSliceSize *= THTensor_sizeLegacyNoScalars(src, d); if (!mismatch && THTensor_sizeLegacyNoScalars(dst, d) != THTensor_sizeLegacyNoScalars(src, d)) mismatch = true; } } THArgCheck(dstSliceSize == srcSliceSize, 2, "Source/destination tensor have different slice sizes (%ld vs %ld)", dstSliceSize, srcSliceSize); if (mismatch) { static bool warningShown = false; if (!warningShown) { warningShown = true; fprintf(stderr, "Warning: source/destination slices have same size but different " "shape for an index operation. This behavior is deprecated.\n"); } } return dstSliceSize; } // Compare the stride between adjacent slices (sliceStride) with strides in the // other dimensions (i.e., strides *inside* each slice). // // - Returns true if some dimension inside the slice has lower stride than // sliceStride. The simplest example is a 2-D contiguous tensor with sliceDim // == 0 (that is, each slice is a row). // // In this case, we choose the CUDA kernel that processes the data in // "index-major order". For example, if thread count equals slice size, then // all threads process slice #0 in lockstep, and then slice #1, and so on. // // - Otherwise (i.e., sliceStride has the lowest value), this function returns // false. The simplest example is a 2-D contiguous tensor with sliceDim == 1 // (each slice is a column). // // In this case, we choose the CUDA kernel that processes the data in // "elementInSlice-major order". For example, each thread can process element // #0 of every slice, and then element #1 of every slice, and so on. bool THCTensor_(indexShouldBeMajor)(TensorInfo<scalar_t, unsigned int> &info, int sliceDim) { // The stride between adjacent slices (e.g., between element #0 of slice #100 // and element #0 of slice #101). unsigned int sliceStride = info.strides[sliceDim]; for (int i = 0; i < info.dims; ++i) { if (i != sliceDim && info.sizes[i] > 1 && info.strides[i] < sliceStride) { return true; } } return false; } void THCTensor_(indexCopy)(THCState *state, THCTensor *dst, int dim, THCudaLongTensor *indices, THCTensor *src) { THCAssertSameGPU(THCTensor_(checkGPU)(state, 2, dst, src)); THCAssertSameGPU(THCudaLongTensor_checkGPU(state, 1, indices)); dim = at::maybe_wrap_dim(dim, dst); int dims = THCTensor_(nDimensionLegacyNoScalars)(state, dst); THArgCheck(dims <= MAX_CUTORCH_DIMS, 2, CUTORCH_DIM_WARNING); dims = THCTensor_(nDimensionLegacyNoScalars)(state, src); THArgCheck(dims <= MAX_CUTORCH_DIMS, 5, CUTORCH_DIM_WARNING); dims = THCudaLongTensor_nDimensionLegacyNoScalars(state, indices); THArgCheck(dims <= MAX_CUTORCH_DIMS, 4, CUTORCH_DIM_WARNING); // The `src` is partitioned into two parts: // -the size of each slice we are indexing, which is the // total size of the tensor ignoring dimension `dim`; // -the number of indices we are choosing, which is the total size // of the tensor `indices`. ptrdiff_t sliceSize = THCTensor_(getSliceSize)(state, dst, dim, indices, src); ptrdiff_t srcTotalSize = THCTensor_(nElement)(state, src); int64_t dstCopyDimSize = THCTensor_(sizeLegacyNoScalars)(state, dst, dim); ptrdiff_t numIndices = THCudaLongTensor_nElement(state, indices); if (sliceSize == 0) { return; } hipStream_t stream = c10::hip::getCurrentHIPStreamMasqueradingAsCUDA(); int indContig = THCudaLongTensor_isContiguous(state, indices); int mpc = at::cuda::getCurrentDeviceProperties()->multiProcessorCount; #define SMALL_INDEX(TENSOR_TYPE, TYPE, DST_DIM, SRC_DIM, IDX_DIM) \ hipLaunchKernelGGL(( indexCopySmallIndex<TENSOR_TYPE, TYPE, DST_DIM, SRC_DIM, IDX_DIM>) \ , dim3(smallIndexGrid), dim3(smallIndexBlock), 0, stream, \ dstInfo, srcInfo, indicesInfo, \ dstCopyDim, srcCopyDim, sliceSize, dstCopyDimSize); #define LARGE_INDEX(TENSOR_TYPE, TYPE, \ DST_DIM, SRC_DIM, IDX_DIM, IDX_IS_MAJOR) \ hipLaunchKernelGGL(( indexCopyLargeIndex<TENSOR_TYPE, TYPE, \ DST_DIM, SRC_DIM, IDX_DIM, IDX_IS_MAJOR>) \ , dim3(largeIndexGrid), dim3(largeIndexBlock), 0, stream, \ dstInfo, srcInfo, indicesInfo, \ dstCopyDim, srcCopyDim, srcTotalSize, \ (IDX_IS_MAJOR) ? sliceSize : numIndices, \ dstCopyDimSize); dim3 smallIndexGrid(::min(THCCeilDiv(sliceSize, (ptrdiff_t)128), (ptrdiff_t)(mpc * 8))); dim3 smallIndexBlock(::min(sliceSize, (ptrdiff_t)128)); dim3 largeIndexGrid(::min(THCCeilDiv(srcTotalSize, (ptrdiff_t)128), (ptrdiff_t)(mpc * 8))); dim3 largeIndexBlock(::min(srcTotalSize, (ptrdiff_t)128)); if (THCTensor_canUse32BitIndexMath(state, dst) && THCTensor_canUse32BitIndexMath(state, src) && THCTensor_canUse32BitIndexMath(state, indices)) { TensorInfo<scalar_t, unsigned int> dstInfo = getTensorInfo<scalar_t, THCTensor, unsigned int>(state, dst); int dstCopyDim = dstInfo.collapseDims(dim); dstInfo.reduceDim(dstCopyDim); TensorInfo<scalar_t, unsigned int> srcInfo = getTensorInfo<scalar_t, THCTensor, unsigned int>(state, src); int srcCopyDim = srcInfo.collapseDims(dim); srcInfo.reduceDim(srcCopyDim); TensorInfo<int64_t, unsigned int> indicesInfo = getTensorInfo<int64_t, THCudaLongTensor, unsigned int>(state, indices); indicesInfo.collapseDims(); // A reasonable choice for when to have each thread iterate over // indices to choose if (numIndices <= 16) { if (dstInfo.dims == 1 && srcInfo.dims == 1 && indContig) { SMALL_INDEX(scalar_t, unsigned int, 1, 1, -2); } else if (dstInfo.dims == 2 && srcInfo.dims == 2 && indContig) { SMALL_INDEX(scalar_t, unsigned int, 2, 2, -2); } else if (dstInfo.dims == 3 && srcInfo.dims == 3 && indContig) { SMALL_INDEX(scalar_t, unsigned int, 3, 3, -2); } else { SMALL_INDEX(scalar_t, unsigned int, -1, -1, -1); } } else { bool indexIsMajor = THCTensor_(indexShouldBeMajor)(dstInfo, dstCopyDim); if (dstInfo.dims == 1 && srcInfo.dims == 1 && indContig) { LARGE_INDEX(scalar_t, unsigned int, 1, 1, -2, true); } else if (dstInfo.dims == 2 && srcInfo.dims == 2 && indContig) { if (indexIsMajor) { LARGE_INDEX(scalar_t, unsigned int, 2, 2, -2, true); } else { LARGE_INDEX(scalar_t, unsigned int, 2, 2, -2, false); } } else if (dstInfo.dims == 3 && srcInfo.dims == 3 && indContig) { if (indexIsMajor) { LARGE_INDEX(scalar_t, unsigned int, 3, 3, -2, true); } else { LARGE_INDEX(scalar_t, unsigned int, 3, 3, -2, false); } } else { LARGE_INDEX(scalar_t, unsigned int, -1, -1, -1, true); } } } else { TensorInfo<scalar_t, uint64_t> dstInfo = getTensorInfo<scalar_t, THCTensor, uint64_t>(state, dst); int dstCopyDim = dstInfo.collapseDims(dim); dstInfo.reduceDim(dstCopyDim); TensorInfo<scalar_t, uint64_t> srcInfo = getTensorInfo<scalar_t, THCTensor, uint64_t>(state, src); int srcCopyDim = srcInfo.collapseDims(dim); srcInfo.reduceDim(srcCopyDim); TensorInfo<int64_t, uint64_t> indicesInfo = getTensorInfo<int64_t, THCudaLongTensor, uint64_t>(state, indices); indicesInfo.collapseDims(); LARGE_INDEX(scalar_t, uint64_t, -1, -1, -1, true); } #undef SMALL_INDEX #undef LARGE_INDEX } void THCTensor_(take)(THCState *state, THCTensor *dst, THCTensor *src, THCudaLongTensor *index) { THCAssertSameGPU(THCTensor_(checkGPU)(state, 2, dst, src)); THCAssertSameGPU(THCudaLongTensor_checkGPU(state, 1, index)); THArgCheck(THCTensor_(nDimensionLegacyNoScalars)(state, src) <= MAX_CUTORCH_DIMS, 2, CUTORCH_DIM_WARNING); THArgCheck(THCTensor_(nDimensionLegacyNoScalars)(state, dst) <= MAX_CUTORCH_DIMS, 2, CUTORCH_DIM_WARNING); THArgCheck(THCudaLongTensor_nDimensionLegacyNoScalars(state, index) <= MAX_CUTORCH_DIMS, 2, CUTORCH_DIM_WARNING); THArgCheck(!(THCTensor_(numel)(state, src) == 0 && THCudaLongTensor_numel(state, index) != 0), 2, "tried to take from an empty tensor"); THCTensor_(resizeNd)(state, dst, index->dim(), THTensor_getSizePtr(index), NULL); dispatchTakePut<scalar_t, TensorTakeOp>(state, src, dst, index); } static void THCTensor_(sort_indices)(THCState *state, THCudaLongTensor *index, THCTensor *src) { THCThrustAllocator thrustAlloc(state); auto index_iter = thrust::device_ptr<int64_t>(THCudaLongTensor_data(state, index)); auto src_iter = thrust::device_ptr<scalar_t>(THCTensor_(data)(state, src)); auto numel = THCTensor_(numel)(state, src); thrust::sort_by_key( thrust::hip::par(thrustAlloc).on(c10::hip::getCurrentHIPStreamMasqueradingAsCUDA()), index_iter, index_iter + numel, src_iter, ThrustLTOp<int64_t>()); } void THCTensor_(put)(THCState *state, THCTensor *dst, THCudaLongTensor *index, THCTensor *src, int accumulate) { THCAssertSameGPU(THCTensor_(checkGPU)(state, 2, dst, src)); THCAssertSameGPU(THCudaLongTensor_checkGPU(state, 1, index)); ptrdiff_t dstSize = THCTensor_(nElement)(state, dst); ptrdiff_t numIndices = THCudaLongTensor_nElement(state, index); THArgCheck(THCTensor_(nElement)(state, src) == numIndices, 3, "src should have the same number of elements as index"); THArgCheck(THCTensor_(nDimensionLegacyNoScalars)(state, dst) <= MAX_CUTORCH_DIMS, 2, CUTORCH_DIM_WARNING); THArgCheck(THCTensor_(nDimensionLegacyNoScalars)(state, src) <= MAX_CUTORCH_DIMS, 2, CUTORCH_DIM_WARNING); THArgCheck(THCudaLongTensor_nDimensionLegacyNoScalars(state, index) <= MAX_CUTORCH_DIMS, 2, CUTORCH_DIM_WARNING); if (numIndices == 0) { return; } if (accumulate) { // wrap indices so to replace negative indices THCudaLongTensor* sorted_index = THCudaLongTensor_new(state); THCudaLongTensor_resizeAs(state, sorted_index, index); THC_pointwiseApply2<int64_t, int64_t>(state, sorted_index, index, WrapIndexOp(dstSize)); THCTensor* sorted_src = THCTensor_(newClone)(state, src); THCTensor_(sort_indices)(state, sorted_index, sorted_src); dispatchTakePut<scalar_t, TensorPutAccumulateOp>(state, dst, sorted_src, sorted_index); THCTensor_(free)(state, sorted_src); THCudaLongTensor_free(state, sorted_index); } else { dispatchTakePut<scalar_t, TensorPutOp>(state, dst, src, index); } } void THCTensor_(indexFill)(THCState *state, THCTensor *dst, int dim, THCudaLongTensor *indices, scalar_t val) { at::NoNamesGuard guard; THCAssertSameGPU(THCTensor_(checkGPU)(state, 1, dst)); THCAssertSameGPU(THCudaLongTensor_checkGPU(state, 1, indices)); dim = at::maybe_wrap_dim(dim, dst); int dims = THCTensor_(nDimensionLegacyNoScalars)(state, dst); THArgCheck(dims <= MAX_CUTORCH_DIMS, 2, CUTORCH_DIM_WARNING); dims = THCudaLongTensor_nDimensionLegacyNoScalars(state, indices); THArgCheck(dims <= MAX_CUTORCH_DIMS, 4, CUTORCH_DIM_WARNING); // The `src` is partitioned into two parts: // -the size of each slice we are indexing, which is the // total size of the tensor ignoring dimension `dim`; // -the number of indices we are choosing, which is the total size // of the tensor `indices`. ptrdiff_t sliceSize = THCTensor_(getSliceSize)(state, dst, dim, indices, nullptr); ptrdiff_t dstTotalSize = THCTensor_(nElement)(state, dst); int64_t dstFillDimSize = THCTensor_(sizeLegacyNoScalars)(state, dst, dim); ptrdiff_t numIndices = THCudaLongTensor_nElement(state, indices); if (sliceSize == 0) { return; } hipStream_t stream = c10::hip::getCurrentHIPStreamMasqueradingAsCUDA(); int indContig = THCudaLongTensor_isContiguous(state, indices); int mpc = at::cuda::getCurrentDeviceProperties()->multiProcessorCount; #define SMALL_INDEX(TENSOR_TYPE, TYPE, DST_DIM, IDX_DIM) \ hipLaunchKernelGGL(( indexFillSmallIndex<TENSOR_TYPE, TYPE, DST_DIM, IDX_DIM>) \ , dim3(smallIndexGrid), dim3(smallIndexBlock), 0, stream, \ dstInfo, indicesInfo, \ dstFillDim, sliceSize, dstFillDimSize, val); #define LARGE_INDEX(TENSOR_TYPE, TYPE, DST_DIM, IDX_DIM, IDX_IS_MAJOR) \ hipLaunchKernelGGL(( indexFillLargeIndex<TENSOR_TYPE, TYPE, DST_DIM, IDX_DIM, IDX_IS_MAJOR>) \ , dim3(largeIndexGrid), dim3(largeIndexBlock), 0, stream, \ dstInfo, indicesInfo, \ dstFillDim, sliceSize * numIndices, \ (IDX_IS_MAJOR) ? sliceSize : numIndices, \ dstFillDimSize, val); dim3 smallIndexGrid(::min(THCCeilDiv(sliceSize, (ptrdiff_t)128), (ptrdiff_t)(mpc * 8))); dim3 smallIndexBlock(::min(sliceSize, (ptrdiff_t)128)); dim3 largeIndexGrid(::min(THCCeilDiv(dstTotalSize, (ptrdiff_t)128), (ptrdiff_t)(mpc * 8))); dim3 largeIndexBlock(::min(dstTotalSize, (ptrdiff_t)128)); if (THCTensor_canUse32BitIndexMath(state, dst) && THCTensor_canUse32BitIndexMath(state, indices)) { TensorInfo<scalar_t, unsigned int> dstInfo = getTensorInfo<scalar_t, THCTensor, unsigned int>(state, dst); int dstFillDim = dstInfo.collapseDims(dim); dstInfo.reduceDim(dstFillDim); TensorInfo<int64_t, unsigned int> indicesInfo = getTensorInfo<int64_t, THCudaLongTensor, unsigned int>(state, indices); indicesInfo.collapseDims(); // A reasonable choice for when to have each thread iterate over // indices to choose if (numIndices <= 16) { if (dstInfo.dims == 1 && indContig) { SMALL_INDEX(scalar_t, unsigned int, 1, -2); } else if (dstInfo.dims == 2 && indContig) { SMALL_INDEX(scalar_t, unsigned int, 2, -2); } else if (dstInfo.dims == 3 && indContig) { SMALL_INDEX(scalar_t, unsigned int, 3, -2); } else { SMALL_INDEX(scalar_t, unsigned int, -1, -1); } } else { bool indexIsMajor = THCTensor_(indexShouldBeMajor)(dstInfo, dstFillDim); if (dstInfo.dims == 1 && indContig) { LARGE_INDEX(scalar_t, unsigned int, 1, -2, true); } else if (dstInfo.dims == 2 && indContig) { if (indexIsMajor) { LARGE_INDEX(scalar_t, unsigned int, 2, -2, true); } else { LARGE_INDEX(scalar_t, unsigned int, 2, -2, false); } } else if (dstInfo.dims == 3 && indContig) { if (indexIsMajor) { LARGE_INDEX(scalar_t, unsigned int, 3, -2, true); } else { LARGE_INDEX(scalar_t, unsigned int, 3, -2, false); } } else { LARGE_INDEX(scalar_t, unsigned int, -1, -1, true); } } } else { TensorInfo<scalar_t, uint64_t> dstInfo = getTensorInfo<scalar_t, THCTensor, uint64_t>(state, dst); int dstFillDim = dstInfo.collapseDims(dim); dstInfo.reduceDim(dstFillDim); TensorInfo<int64_t, uint64_t> indicesInfo = getTensorInfo<int64_t, THCudaLongTensor, uint64_t>(state, indices); indicesInfo.collapseDims(); LARGE_INDEX(scalar_t, uint64_t, -1, -1, true); } #undef SMALL_INDEX #undef LARGE_INDEX } void THCTensor_(indexSelect)(THCState *state, THCTensor *dst, THCTensor *src, int dim, THCudaLongTensor *indices) { #if defined(THC_REAL_IS_BFLOAT16) && !defined(__HIP_PLATFORM_HCC__) TORCH_CHECK(false, "indexSelect not suppported with BFloat16"); #else THCAssertSameGPU(THCTensor_(checkGPU)(state, 3, dst, src, indices)); dim = at::maybe_wrap_dim(dim, src); int dims = THCTensor_(nDimensionLegacyNoScalars)(state, dst); THArgCheck(dims <= MAX_CUTORCH_DIMS, 2, CUTORCH_DIM_WARNING); dims = THCTensor_(nDimensionLegacyNoScalars)(state, src); THArgCheck(dims <= MAX_CUTORCH_DIMS, 3, CUTORCH_DIM_WARNING); dims = THCudaLongTensor_nDimensionLegacyNoScalars(state, indices); THArgCheck(dims <= MAX_CUTORCH_DIMS, 5, CUTORCH_DIM_WARNING); ptrdiff_t numIndices = THCudaLongTensor_nElement(state, indices); int srcDims = THCTensor_(nDimensionLegacyNoScalars)(state, src); hipStream_t stream = c10::hip::getCurrentHIPStreamMasqueradingAsCUDA(); THArgCheck(THCudaLongTensor_nDimensionLegacyNoScalars(state, indices) <= 1, 3, "Index is supposed to be an empty tensor or a vector"); THArgCheck(dim < srcDims, 4, "Indexing dim is out of bounds"); THArgCheck(srcDims > 0, 2, "Source tensor is empty"); std::vector<int64_t> newSize = src->sizes().vec(); if (src->dim() > 0) { newSize[dim] = numIndices; } THCTensor_(resize)(state, dst, newSize, {}); ptrdiff_t dstTotalSize = THCTensor_(nElement)(state, dst); if (dstTotalSize == 0) { return; } int indContig = THCudaLongTensor_isContiguous(state, indices); // The `src` is partitioned into two parts: // -the size of each slice we are indexing, which is the // total size of the tensor ignoring dimension `dim`; // -the number of indices we are choosing, which is the total size // of the tensor `indices`. int64_t srcSelectDimSize = THCTensor_(sizeLegacyNoScalars)(state, src, dim); ptrdiff_t sliceSize = dstTotalSize / numIndices; int mpc = at::cuda::getCurrentDeviceProperties()->multiProcessorCount; #define SMALL_INDEX(TENSOR_TYPE, TYPE, DST_DIM, SRC_DIM, IDX_DIM) \ hipLaunchKernelGGL(( indexSelectSmallIndex<TENSOR_TYPE, TYPE, DST_DIM, SRC_DIM, IDX_DIM>) \ , dim3(smallIndexGrid), dim3(smallIndexBlock), 0, stream, \ dstInfo, srcInfo, indicesInfo, \ dstSelectDim, srcSelectDim, static_cast<TYPE>(sliceSize), \ srcSelectDimSize); #define LARGE_INDEX(TENSOR_TYPE, TYPE, \ DST_DIM, SRC_DIM, IDX_DIM, IDX_IS_MAJOR) \ hipLaunchKernelGGL(( indexSelectLargeIndex<TENSOR_TYPE, TYPE, \ DST_DIM, SRC_DIM, IDX_DIM, IDX_IS_MAJOR>) \ , dim3(largeIndexGrid), dim3(largeIndexBlock), 0, stream, \ dstInfo, srcInfo, indicesInfo, \ dstSelectDim, srcSelectDim, static_cast<TYPE>(dstTotalSize), \ static_cast<TYPE>((IDX_IS_MAJOR) ? sliceSize : numIndices), \ srcSelectDimSize); dim3 smallIndexGrid(::min(THCCeilDiv(sliceSize, (ptrdiff_t)128), (ptrdiff_t)(mpc * 8))); dim3 smallIndexBlock(::min(sliceSize, (ptrdiff_t)128)); dim3 largeIndexGrid(::min(THCCeilDiv(dstTotalSize, (ptrdiff_t)128), (ptrdiff_t)(mpc * 8))); dim3 largeIndexBlock(::min(dstTotalSize, (ptrdiff_t)128)); if (THCTensor_canUse32BitIndexMath(state, dst) && THCTensor_canUse32BitIndexMath(state, src) && THCTensor_canUse32BitIndexMath(state, indices)) { TensorInfo<scalar_t, unsigned int> dstInfo = getTensorInfo<scalar_t, THCTensor, unsigned int>(state, dst); int dstSelectDim = dstInfo.collapseDims(dim); dstInfo.reduceDim(dstSelectDim); TensorInfo<scalar_t, unsigned int> srcInfo = getTensorInfo<scalar_t, THCTensor, unsigned int>(state, src); int srcSelectDim = srcInfo.collapseDims(dim); srcInfo.reduceDim(srcSelectDim); TensorInfo<int64_t, unsigned int> indicesInfo = getTensorInfo<int64_t, THCudaLongTensor, unsigned int>(state, indices); indicesInfo.collapseDims(); // A reasonable choice for when to have each thread iterate over // indices to choose if (numIndices <= 16) { if (dstInfo.dims == 1 && srcInfo.dims == 1 && indContig) { SMALL_INDEX(scalar_t, unsigned int, 1, 1, -2); } else if (dstInfo.dims == 2 && srcInfo.dims == 2 && indContig) { SMALL_INDEX(scalar_t, unsigned int, 2, 2, -2); } else if (dstInfo.dims == 3 && srcInfo.dims == 3 && indContig) { SMALL_INDEX(scalar_t, unsigned int, 3, 3, -2); } else { SMALL_INDEX(scalar_t, unsigned int, -1, -1, -1); } } else { bool indexIsMajor = THCTensor_(indexShouldBeMajor)(dstInfo, dstSelectDim); if (dstInfo.dims == 1 && srcInfo.dims == 1 && indContig) { LARGE_INDEX(scalar_t, unsigned int, 1, 1, -2, true); } else if (dstInfo.dims == 2 && srcInfo.dims == 2 && indContig) { if (indexIsMajor) { LARGE_INDEX(scalar_t, unsigned int, 2, 2, -2, true); } else { LARGE_INDEX(scalar_t, unsigned int, 2, 2, -2, false); } } else if (dstInfo.dims == 3 && srcInfo.dims == 3 && indContig) { if (indexIsMajor) { LARGE_INDEX(scalar_t, unsigned int, 3, 3, -2, true); } else { LARGE_INDEX(scalar_t, unsigned int, 3, 3, -2, false); } } else { LARGE_INDEX(scalar_t, unsigned int, -1, -1, -1, true); } } } else { TensorInfo<scalar_t, uint64_t> dstInfo = getTensorInfo<scalar_t, THCTensor, uint64_t>(state, dst); int dstSelectDim = dstInfo.collapseDims(dim); dstInfo.reduceDim(dstSelectDim); TensorInfo<scalar_t, uint64_t> srcInfo = getTensorInfo<scalar_t, THCTensor, uint64_t>(state, src); int srcSelectDim = srcInfo.collapseDims(dim); srcInfo.reduceDim(srcSelectDim); TensorInfo<int64_t, uint64_t> indicesInfo = getTensorInfo<int64_t, THCudaLongTensor, uint64_t>(state, indices); indicesInfo.collapseDims(); LARGE_INDEX(scalar_t, uint64_t, -1, -1, -1, true); } #undef SMALL_INDEX #undef LARGE_INDEX #endif // THC_REAL_IS_BFLOAT16 && !__HIP_PLATFORM_HCC__ } #endif
0b15d8e0731c417525e60f333bf1e1436bf576cd.cu
#ifndef THC_GENERIC_FILE #define THC_GENERIC_FILE "THC/generic/THCTensorIndex.cu" #else #include "ATen/cuda/CUDAContext.h" // Check tensor dimensions for index operations, and return the slice size. // src can be nullptr in case of indexFill: in that case it is ignored. static ptrdiff_t THCTensor_(getSliceSize)(THCState *state, THCTensor *dst, int dim, THCudaLongTensor *index, THCTensor *src) { int dstDims = THCTensor_(nDimensionLegacyNoScalars)(state, dst); int srcDims = (src == nullptr) ? dstDims : THCTensor_(nDimensionLegacyNoScalars)(state, src); THArgCheck(THCudaLongTensor_nDimensionLegacyNoScalars(state, index) == 1, 4, "expecting vector of indices"); THArgCheck(dim >= 0 && dim < dstDims, 2, "Indexing dim is out of bounds"); ptrdiff_t dstSliceSize = 1; for (int d = 0; d < dstDims; d++) { if (d != dim) { dstSliceSize *= THTensor_sizeLegacyNoScalars(dst, d); } } if (src == nullptr) return dstSliceSize; THArgCheck(dim < srcDims, 3, "Indexing dim is out of bounds"); THArgCheck(THCudaLongTensor_nElement(state, index) == THTensor_sizeLegacyNoScalars(src, dim), 4, "length of src.size[dim] is not equal to length of indices"); ptrdiff_t srcSliceSize = 1; bool mismatch = false; if (dstDims != srcDims) mismatch = true; for (int d = 0; d < srcDims; d++) { if (d != dim) { srcSliceSize *= THTensor_sizeLegacyNoScalars(src, d); if (!mismatch && THTensor_sizeLegacyNoScalars(dst, d) != THTensor_sizeLegacyNoScalars(src, d)) mismatch = true; } } THArgCheck(dstSliceSize == srcSliceSize, 2, "Source/destination tensor have different slice sizes (%ld vs %ld)", dstSliceSize, srcSliceSize); if (mismatch) { static bool warningShown = false; if (!warningShown) { warningShown = true; fprintf(stderr, "Warning: source/destination slices have same size but different " "shape for an index operation. This behavior is deprecated.\n"); } } return dstSliceSize; } // Compare the stride between adjacent slices (sliceStride) with strides in the // other dimensions (i.e., strides *inside* each slice). // // - Returns true if some dimension inside the slice has lower stride than // sliceStride. The simplest example is a 2-D contiguous tensor with sliceDim // == 0 (that is, each slice is a row). // // In this case, we choose the CUDA kernel that processes the data in // "index-major order". For example, if thread count equals slice size, then // all threads process slice #0 in lockstep, and then slice #1, and so on. // // - Otherwise (i.e., sliceStride has the lowest value), this function returns // false. The simplest example is a 2-D contiguous tensor with sliceDim == 1 // (each slice is a column). // // In this case, we choose the CUDA kernel that processes the data in // "elementInSlice-major order". For example, each thread can process element // #0 of every slice, and then element #1 of every slice, and so on. bool THCTensor_(indexShouldBeMajor)(TensorInfo<scalar_t, unsigned int> &info, int sliceDim) { // The stride between adjacent slices (e.g., between element #0 of slice #100 // and element #0 of slice #101). unsigned int sliceStride = info.strides[sliceDim]; for (int i = 0; i < info.dims; ++i) { if (i != sliceDim && info.sizes[i] > 1 && info.strides[i] < sliceStride) { return true; } } return false; } void THCTensor_(indexCopy)(THCState *state, THCTensor *dst, int dim, THCudaLongTensor *indices, THCTensor *src) { THCAssertSameGPU(THCTensor_(checkGPU)(state, 2, dst, src)); THCAssertSameGPU(THCudaLongTensor_checkGPU(state, 1, indices)); dim = at::maybe_wrap_dim(dim, dst); int dims = THCTensor_(nDimensionLegacyNoScalars)(state, dst); THArgCheck(dims <= MAX_CUTORCH_DIMS, 2, CUTORCH_DIM_WARNING); dims = THCTensor_(nDimensionLegacyNoScalars)(state, src); THArgCheck(dims <= MAX_CUTORCH_DIMS, 5, CUTORCH_DIM_WARNING); dims = THCudaLongTensor_nDimensionLegacyNoScalars(state, indices); THArgCheck(dims <= MAX_CUTORCH_DIMS, 4, CUTORCH_DIM_WARNING); // The `src` is partitioned into two parts: // -the size of each slice we are indexing, which is the // total size of the tensor ignoring dimension `dim`; // -the number of indices we are choosing, which is the total size // of the tensor `indices`. ptrdiff_t sliceSize = THCTensor_(getSliceSize)(state, dst, dim, indices, src); ptrdiff_t srcTotalSize = THCTensor_(nElement)(state, src); int64_t dstCopyDimSize = THCTensor_(sizeLegacyNoScalars)(state, dst, dim); ptrdiff_t numIndices = THCudaLongTensor_nElement(state, indices); if (sliceSize == 0) { return; } cudaStream_t stream = c10::cuda::getCurrentCUDAStream(); int indContig = THCudaLongTensor_isContiguous(state, indices); int mpc = at::cuda::getCurrentDeviceProperties()->multiProcessorCount; #define SMALL_INDEX(TENSOR_TYPE, TYPE, DST_DIM, SRC_DIM, IDX_DIM) \ indexCopySmallIndex<TENSOR_TYPE, TYPE, DST_DIM, SRC_DIM, IDX_DIM> \ <<<smallIndexGrid, smallIndexBlock, 0, stream>>>( \ dstInfo, srcInfo, indicesInfo, \ dstCopyDim, srcCopyDim, sliceSize, dstCopyDimSize); #define LARGE_INDEX(TENSOR_TYPE, TYPE, \ DST_DIM, SRC_DIM, IDX_DIM, IDX_IS_MAJOR) \ indexCopyLargeIndex<TENSOR_TYPE, TYPE, \ DST_DIM, SRC_DIM, IDX_DIM, IDX_IS_MAJOR> \ <<<largeIndexGrid, largeIndexBlock, 0, stream>>>( \ dstInfo, srcInfo, indicesInfo, \ dstCopyDim, srcCopyDim, srcTotalSize, \ (IDX_IS_MAJOR) ? sliceSize : numIndices, \ dstCopyDimSize); dim3 smallIndexGrid(std::min(THCCeilDiv(sliceSize, (ptrdiff_t)128), (ptrdiff_t)(mpc * 8))); dim3 smallIndexBlock(std::min(sliceSize, (ptrdiff_t)128)); dim3 largeIndexGrid(std::min(THCCeilDiv(srcTotalSize, (ptrdiff_t)128), (ptrdiff_t)(mpc * 8))); dim3 largeIndexBlock(std::min(srcTotalSize, (ptrdiff_t)128)); if (THCTensor_canUse32BitIndexMath(state, dst) && THCTensor_canUse32BitIndexMath(state, src) && THCTensor_canUse32BitIndexMath(state, indices)) { TensorInfo<scalar_t, unsigned int> dstInfo = getTensorInfo<scalar_t, THCTensor, unsigned int>(state, dst); int dstCopyDim = dstInfo.collapseDims(dim); dstInfo.reduceDim(dstCopyDim); TensorInfo<scalar_t, unsigned int> srcInfo = getTensorInfo<scalar_t, THCTensor, unsigned int>(state, src); int srcCopyDim = srcInfo.collapseDims(dim); srcInfo.reduceDim(srcCopyDim); TensorInfo<int64_t, unsigned int> indicesInfo = getTensorInfo<int64_t, THCudaLongTensor, unsigned int>(state, indices); indicesInfo.collapseDims(); // A reasonable choice for when to have each thread iterate over // indices to choose if (numIndices <= 16) { if (dstInfo.dims == 1 && srcInfo.dims == 1 && indContig) { SMALL_INDEX(scalar_t, unsigned int, 1, 1, -2); } else if (dstInfo.dims == 2 && srcInfo.dims == 2 && indContig) { SMALL_INDEX(scalar_t, unsigned int, 2, 2, -2); } else if (dstInfo.dims == 3 && srcInfo.dims == 3 && indContig) { SMALL_INDEX(scalar_t, unsigned int, 3, 3, -2); } else { SMALL_INDEX(scalar_t, unsigned int, -1, -1, -1); } } else { bool indexIsMajor = THCTensor_(indexShouldBeMajor)(dstInfo, dstCopyDim); if (dstInfo.dims == 1 && srcInfo.dims == 1 && indContig) { LARGE_INDEX(scalar_t, unsigned int, 1, 1, -2, true); } else if (dstInfo.dims == 2 && srcInfo.dims == 2 && indContig) { if (indexIsMajor) { LARGE_INDEX(scalar_t, unsigned int, 2, 2, -2, true); } else { LARGE_INDEX(scalar_t, unsigned int, 2, 2, -2, false); } } else if (dstInfo.dims == 3 && srcInfo.dims == 3 && indContig) { if (indexIsMajor) { LARGE_INDEX(scalar_t, unsigned int, 3, 3, -2, true); } else { LARGE_INDEX(scalar_t, unsigned int, 3, 3, -2, false); } } else { LARGE_INDEX(scalar_t, unsigned int, -1, -1, -1, true); } } } else { TensorInfo<scalar_t, uint64_t> dstInfo = getTensorInfo<scalar_t, THCTensor, uint64_t>(state, dst); int dstCopyDim = dstInfo.collapseDims(dim); dstInfo.reduceDim(dstCopyDim); TensorInfo<scalar_t, uint64_t> srcInfo = getTensorInfo<scalar_t, THCTensor, uint64_t>(state, src); int srcCopyDim = srcInfo.collapseDims(dim); srcInfo.reduceDim(srcCopyDim); TensorInfo<int64_t, uint64_t> indicesInfo = getTensorInfo<int64_t, THCudaLongTensor, uint64_t>(state, indices); indicesInfo.collapseDims(); LARGE_INDEX(scalar_t, uint64_t, -1, -1, -1, true); } #undef SMALL_INDEX #undef LARGE_INDEX } void THCTensor_(take)(THCState *state, THCTensor *dst, THCTensor *src, THCudaLongTensor *index) { THCAssertSameGPU(THCTensor_(checkGPU)(state, 2, dst, src)); THCAssertSameGPU(THCudaLongTensor_checkGPU(state, 1, index)); THArgCheck(THCTensor_(nDimensionLegacyNoScalars)(state, src) <= MAX_CUTORCH_DIMS, 2, CUTORCH_DIM_WARNING); THArgCheck(THCTensor_(nDimensionLegacyNoScalars)(state, dst) <= MAX_CUTORCH_DIMS, 2, CUTORCH_DIM_WARNING); THArgCheck(THCudaLongTensor_nDimensionLegacyNoScalars(state, index) <= MAX_CUTORCH_DIMS, 2, CUTORCH_DIM_WARNING); THArgCheck(!(THCTensor_(numel)(state, src) == 0 && THCudaLongTensor_numel(state, index) != 0), 2, "tried to take from an empty tensor"); THCTensor_(resizeNd)(state, dst, index->dim(), THTensor_getSizePtr(index), NULL); dispatchTakePut<scalar_t, TensorTakeOp>(state, src, dst, index); } static void THCTensor_(sort_indices)(THCState *state, THCudaLongTensor *index, THCTensor *src) { THCThrustAllocator thrustAlloc(state); auto index_iter = thrust::device_ptr<int64_t>(THCudaLongTensor_data(state, index)); auto src_iter = thrust::device_ptr<scalar_t>(THCTensor_(data)(state, src)); auto numel = THCTensor_(numel)(state, src); thrust::sort_by_key( thrust::cuda::par(thrustAlloc).on(c10::cuda::getCurrentCUDAStream()), index_iter, index_iter + numel, src_iter, ThrustLTOp<int64_t>()); } void THCTensor_(put)(THCState *state, THCTensor *dst, THCudaLongTensor *index, THCTensor *src, int accumulate) { THCAssertSameGPU(THCTensor_(checkGPU)(state, 2, dst, src)); THCAssertSameGPU(THCudaLongTensor_checkGPU(state, 1, index)); ptrdiff_t dstSize = THCTensor_(nElement)(state, dst); ptrdiff_t numIndices = THCudaLongTensor_nElement(state, index); THArgCheck(THCTensor_(nElement)(state, src) == numIndices, 3, "src should have the same number of elements as index"); THArgCheck(THCTensor_(nDimensionLegacyNoScalars)(state, dst) <= MAX_CUTORCH_DIMS, 2, CUTORCH_DIM_WARNING); THArgCheck(THCTensor_(nDimensionLegacyNoScalars)(state, src) <= MAX_CUTORCH_DIMS, 2, CUTORCH_DIM_WARNING); THArgCheck(THCudaLongTensor_nDimensionLegacyNoScalars(state, index) <= MAX_CUTORCH_DIMS, 2, CUTORCH_DIM_WARNING); if (numIndices == 0) { return; } if (accumulate) { // wrap indices so to replace negative indices THCudaLongTensor* sorted_index = THCudaLongTensor_new(state); THCudaLongTensor_resizeAs(state, sorted_index, index); THC_pointwiseApply2<int64_t, int64_t>(state, sorted_index, index, WrapIndexOp(dstSize)); THCTensor* sorted_src = THCTensor_(newClone)(state, src); THCTensor_(sort_indices)(state, sorted_index, sorted_src); dispatchTakePut<scalar_t, TensorPutAccumulateOp>(state, dst, sorted_src, sorted_index); THCTensor_(free)(state, sorted_src); THCudaLongTensor_free(state, sorted_index); } else { dispatchTakePut<scalar_t, TensorPutOp>(state, dst, src, index); } } void THCTensor_(indexFill)(THCState *state, THCTensor *dst, int dim, THCudaLongTensor *indices, scalar_t val) { at::NoNamesGuard guard; THCAssertSameGPU(THCTensor_(checkGPU)(state, 1, dst)); THCAssertSameGPU(THCudaLongTensor_checkGPU(state, 1, indices)); dim = at::maybe_wrap_dim(dim, dst); int dims = THCTensor_(nDimensionLegacyNoScalars)(state, dst); THArgCheck(dims <= MAX_CUTORCH_DIMS, 2, CUTORCH_DIM_WARNING); dims = THCudaLongTensor_nDimensionLegacyNoScalars(state, indices); THArgCheck(dims <= MAX_CUTORCH_DIMS, 4, CUTORCH_DIM_WARNING); // The `src` is partitioned into two parts: // -the size of each slice we are indexing, which is the // total size of the tensor ignoring dimension `dim`; // -the number of indices we are choosing, which is the total size // of the tensor `indices`. ptrdiff_t sliceSize = THCTensor_(getSliceSize)(state, dst, dim, indices, nullptr); ptrdiff_t dstTotalSize = THCTensor_(nElement)(state, dst); int64_t dstFillDimSize = THCTensor_(sizeLegacyNoScalars)(state, dst, dim); ptrdiff_t numIndices = THCudaLongTensor_nElement(state, indices); if (sliceSize == 0) { return; } cudaStream_t stream = c10::cuda::getCurrentCUDAStream(); int indContig = THCudaLongTensor_isContiguous(state, indices); int mpc = at::cuda::getCurrentDeviceProperties()->multiProcessorCount; #define SMALL_INDEX(TENSOR_TYPE, TYPE, DST_DIM, IDX_DIM) \ indexFillSmallIndex<TENSOR_TYPE, TYPE, DST_DIM, IDX_DIM> \ <<<smallIndexGrid, smallIndexBlock, 0, stream>>>( \ dstInfo, indicesInfo, \ dstFillDim, sliceSize, dstFillDimSize, val); #define LARGE_INDEX(TENSOR_TYPE, TYPE, DST_DIM, IDX_DIM, IDX_IS_MAJOR) \ indexFillLargeIndex<TENSOR_TYPE, TYPE, DST_DIM, IDX_DIM, IDX_IS_MAJOR> \ <<<largeIndexGrid, largeIndexBlock, 0, stream>>>( \ dstInfo, indicesInfo, \ dstFillDim, sliceSize * numIndices, \ (IDX_IS_MAJOR) ? sliceSize : numIndices, \ dstFillDimSize, val); dim3 smallIndexGrid(std::min(THCCeilDiv(sliceSize, (ptrdiff_t)128), (ptrdiff_t)(mpc * 8))); dim3 smallIndexBlock(std::min(sliceSize, (ptrdiff_t)128)); dim3 largeIndexGrid(std::min(THCCeilDiv(dstTotalSize, (ptrdiff_t)128), (ptrdiff_t)(mpc * 8))); dim3 largeIndexBlock(std::min(dstTotalSize, (ptrdiff_t)128)); if (THCTensor_canUse32BitIndexMath(state, dst) && THCTensor_canUse32BitIndexMath(state, indices)) { TensorInfo<scalar_t, unsigned int> dstInfo = getTensorInfo<scalar_t, THCTensor, unsigned int>(state, dst); int dstFillDim = dstInfo.collapseDims(dim); dstInfo.reduceDim(dstFillDim); TensorInfo<int64_t, unsigned int> indicesInfo = getTensorInfo<int64_t, THCudaLongTensor, unsigned int>(state, indices); indicesInfo.collapseDims(); // A reasonable choice for when to have each thread iterate over // indices to choose if (numIndices <= 16) { if (dstInfo.dims == 1 && indContig) { SMALL_INDEX(scalar_t, unsigned int, 1, -2); } else if (dstInfo.dims == 2 && indContig) { SMALL_INDEX(scalar_t, unsigned int, 2, -2); } else if (dstInfo.dims == 3 && indContig) { SMALL_INDEX(scalar_t, unsigned int, 3, -2); } else { SMALL_INDEX(scalar_t, unsigned int, -1, -1); } } else { bool indexIsMajor = THCTensor_(indexShouldBeMajor)(dstInfo, dstFillDim); if (dstInfo.dims == 1 && indContig) { LARGE_INDEX(scalar_t, unsigned int, 1, -2, true); } else if (dstInfo.dims == 2 && indContig) { if (indexIsMajor) { LARGE_INDEX(scalar_t, unsigned int, 2, -2, true); } else { LARGE_INDEX(scalar_t, unsigned int, 2, -2, false); } } else if (dstInfo.dims == 3 && indContig) { if (indexIsMajor) { LARGE_INDEX(scalar_t, unsigned int, 3, -2, true); } else { LARGE_INDEX(scalar_t, unsigned int, 3, -2, false); } } else { LARGE_INDEX(scalar_t, unsigned int, -1, -1, true); } } } else { TensorInfo<scalar_t, uint64_t> dstInfo = getTensorInfo<scalar_t, THCTensor, uint64_t>(state, dst); int dstFillDim = dstInfo.collapseDims(dim); dstInfo.reduceDim(dstFillDim); TensorInfo<int64_t, uint64_t> indicesInfo = getTensorInfo<int64_t, THCudaLongTensor, uint64_t>(state, indices); indicesInfo.collapseDims(); LARGE_INDEX(scalar_t, uint64_t, -1, -1, true); } #undef SMALL_INDEX #undef LARGE_INDEX } void THCTensor_(indexSelect)(THCState *state, THCTensor *dst, THCTensor *src, int dim, THCudaLongTensor *indices) { #if defined(THC_REAL_IS_BFLOAT16) && !defined(__HIP_PLATFORM_HCC__) TORCH_CHECK(false, "indexSelect not suppported with BFloat16"); #else THCAssertSameGPU(THCTensor_(checkGPU)(state, 3, dst, src, indices)); dim = at::maybe_wrap_dim(dim, src); int dims = THCTensor_(nDimensionLegacyNoScalars)(state, dst); THArgCheck(dims <= MAX_CUTORCH_DIMS, 2, CUTORCH_DIM_WARNING); dims = THCTensor_(nDimensionLegacyNoScalars)(state, src); THArgCheck(dims <= MAX_CUTORCH_DIMS, 3, CUTORCH_DIM_WARNING); dims = THCudaLongTensor_nDimensionLegacyNoScalars(state, indices); THArgCheck(dims <= MAX_CUTORCH_DIMS, 5, CUTORCH_DIM_WARNING); ptrdiff_t numIndices = THCudaLongTensor_nElement(state, indices); int srcDims = THCTensor_(nDimensionLegacyNoScalars)(state, src); cudaStream_t stream = c10::cuda::getCurrentCUDAStream(); THArgCheck(THCudaLongTensor_nDimensionLegacyNoScalars(state, indices) <= 1, 3, "Index is supposed to be an empty tensor or a vector"); THArgCheck(dim < srcDims, 4, "Indexing dim is out of bounds"); THArgCheck(srcDims > 0, 2, "Source tensor is empty"); std::vector<int64_t> newSize = src->sizes().vec(); if (src->dim() > 0) { newSize[dim] = numIndices; } THCTensor_(resize)(state, dst, newSize, {}); ptrdiff_t dstTotalSize = THCTensor_(nElement)(state, dst); if (dstTotalSize == 0) { return; } int indContig = THCudaLongTensor_isContiguous(state, indices); // The `src` is partitioned into two parts: // -the size of each slice we are indexing, which is the // total size of the tensor ignoring dimension `dim`; // -the number of indices we are choosing, which is the total size // of the tensor `indices`. int64_t srcSelectDimSize = THCTensor_(sizeLegacyNoScalars)(state, src, dim); ptrdiff_t sliceSize = dstTotalSize / numIndices; int mpc = at::cuda::getCurrentDeviceProperties()->multiProcessorCount; #define SMALL_INDEX(TENSOR_TYPE, TYPE, DST_DIM, SRC_DIM, IDX_DIM) \ indexSelectSmallIndex<TENSOR_TYPE, TYPE, DST_DIM, SRC_DIM, IDX_DIM> \ <<<smallIndexGrid, smallIndexBlock, 0, stream>>>( \ dstInfo, srcInfo, indicesInfo, \ dstSelectDim, srcSelectDim, static_cast<TYPE>(sliceSize), \ srcSelectDimSize); #define LARGE_INDEX(TENSOR_TYPE, TYPE, \ DST_DIM, SRC_DIM, IDX_DIM, IDX_IS_MAJOR) \ indexSelectLargeIndex<TENSOR_TYPE, TYPE, \ DST_DIM, SRC_DIM, IDX_DIM, IDX_IS_MAJOR> \ <<<largeIndexGrid, largeIndexBlock, 0, stream>>>( \ dstInfo, srcInfo, indicesInfo, \ dstSelectDim, srcSelectDim, static_cast<TYPE>(dstTotalSize), \ static_cast<TYPE>((IDX_IS_MAJOR) ? sliceSize : numIndices), \ srcSelectDimSize); dim3 smallIndexGrid(std::min(THCCeilDiv(sliceSize, (ptrdiff_t)128), (ptrdiff_t)(mpc * 8))); dim3 smallIndexBlock(std::min(sliceSize, (ptrdiff_t)128)); dim3 largeIndexGrid(std::min(THCCeilDiv(dstTotalSize, (ptrdiff_t)128), (ptrdiff_t)(mpc * 8))); dim3 largeIndexBlock(std::min(dstTotalSize, (ptrdiff_t)128)); if (THCTensor_canUse32BitIndexMath(state, dst) && THCTensor_canUse32BitIndexMath(state, src) && THCTensor_canUse32BitIndexMath(state, indices)) { TensorInfo<scalar_t, unsigned int> dstInfo = getTensorInfo<scalar_t, THCTensor, unsigned int>(state, dst); int dstSelectDim = dstInfo.collapseDims(dim); dstInfo.reduceDim(dstSelectDim); TensorInfo<scalar_t, unsigned int> srcInfo = getTensorInfo<scalar_t, THCTensor, unsigned int>(state, src); int srcSelectDim = srcInfo.collapseDims(dim); srcInfo.reduceDim(srcSelectDim); TensorInfo<int64_t, unsigned int> indicesInfo = getTensorInfo<int64_t, THCudaLongTensor, unsigned int>(state, indices); indicesInfo.collapseDims(); // A reasonable choice for when to have each thread iterate over // indices to choose if (numIndices <= 16) { if (dstInfo.dims == 1 && srcInfo.dims == 1 && indContig) { SMALL_INDEX(scalar_t, unsigned int, 1, 1, -2); } else if (dstInfo.dims == 2 && srcInfo.dims == 2 && indContig) { SMALL_INDEX(scalar_t, unsigned int, 2, 2, -2); } else if (dstInfo.dims == 3 && srcInfo.dims == 3 && indContig) { SMALL_INDEX(scalar_t, unsigned int, 3, 3, -2); } else { SMALL_INDEX(scalar_t, unsigned int, -1, -1, -1); } } else { bool indexIsMajor = THCTensor_(indexShouldBeMajor)(dstInfo, dstSelectDim); if (dstInfo.dims == 1 && srcInfo.dims == 1 && indContig) { LARGE_INDEX(scalar_t, unsigned int, 1, 1, -2, true); } else if (dstInfo.dims == 2 && srcInfo.dims == 2 && indContig) { if (indexIsMajor) { LARGE_INDEX(scalar_t, unsigned int, 2, 2, -2, true); } else { LARGE_INDEX(scalar_t, unsigned int, 2, 2, -2, false); } } else if (dstInfo.dims == 3 && srcInfo.dims == 3 && indContig) { if (indexIsMajor) { LARGE_INDEX(scalar_t, unsigned int, 3, 3, -2, true); } else { LARGE_INDEX(scalar_t, unsigned int, 3, 3, -2, false); } } else { LARGE_INDEX(scalar_t, unsigned int, -1, -1, -1, true); } } } else { TensorInfo<scalar_t, uint64_t> dstInfo = getTensorInfo<scalar_t, THCTensor, uint64_t>(state, dst); int dstSelectDim = dstInfo.collapseDims(dim); dstInfo.reduceDim(dstSelectDim); TensorInfo<scalar_t, uint64_t> srcInfo = getTensorInfo<scalar_t, THCTensor, uint64_t>(state, src); int srcSelectDim = srcInfo.collapseDims(dim); srcInfo.reduceDim(srcSelectDim); TensorInfo<int64_t, uint64_t> indicesInfo = getTensorInfo<int64_t, THCudaLongTensor, uint64_t>(state, indices); indicesInfo.collapseDims(); LARGE_INDEX(scalar_t, uint64_t, -1, -1, -1, true); } #undef SMALL_INDEX #undef LARGE_INDEX #endif // THC_REAL_IS_BFLOAT16 && !__HIP_PLATFORM_HCC__ } #endif
eb2f8f4f74fe73a54f5622566b59f905336279fc.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include <string.h> #include <math.h> #include <float.h> #include "helper_cuda.h" #include "stdafx.h" #define STB_IMAGE_IMPLEMENTATION #include "stb_image.h" #define STB_IMAGE_WRITE_IMPLEMENTATION #include "stb_image_write.h" #include <stdio.h> #include <stdlib.h> #include <iostream> using namespace std; //////////////////////////////////////////////////////////////////////////////// // GPU routines //////////////////////////////////////////////////////////////////////////////// __global__ void GrayScale(int *g_outdata, unsigned char *g_indata, int bin_width) { int tid = (threadIdx.x + blockDim.x*blockIdx.x)*3; unsigned char gray = (g_indata[tid] + g_indata[tid+1] + g_indata[tid + 2])/3; atomicAdd(&g_outdata[gray / bin_width], 1); //g_outdata[tid] = gray; //g_outdata[tid+1] = gray; //g_outdata[tid+2] = gray; } int main(int argc, const char **argv) { int num_elements, num_threads, mem_size, num_blocks,bin_count,bin_width; int width, height, channels; char filename[20]; char picture[100]; //bool f; unsigned char *din_image; unsigned char *dout_image; int *out_hist; int *histogram; cout << "Enter image location:" << endl; cin >> picture; // User entering blocks count... cout << "Enter blocks count:" << endl; cin >> bin_count; bin_width = 255 / bin_count; if (bin_count % 255 != 0) bin_width += 1; unsigned char *image = stbi_load(picture, &width, &height, &channels, 3); if (!image) cout << "Unsuccessful loading!" << endl; else cout << "Image successfuly loaded" << endl; findCudaDevice(argc, argv); histogram = (int*) malloc(bin_count * sizeof(int)); mem_size = sizeof(char) * (width*height*3); checkCudaErrors(hipMalloc((void**)&din_image, mem_size)); checkCudaErrors(hipMalloc((void**)&out_hist, bin_count * sizeof(int))); //checkCudaErrors(hipMalloc((void**)&dout_image, bin_count * sizeof(int))); checkCudaErrors(hipMemcpy(din_image, image, mem_size, hipMemcpyHostToDevice)); num_elements = width*height; num_threads = 1024; num_blocks = num_elements/num_threads; GrayScale << <num_blocks, num_threads >> > (out_hist, din_image,bin_width); getLastCudaError("GrayScale kernel execution failed"); // copy result from device to host //checkCudaErrors(hipMemcpy(image, dout_image, mem_size, hipMemcpyDeviceToHost)); checkCudaErrors(hipMemcpy(histogram, out_hist, bin_count * sizeof(int), hipMemcpyDeviceToHost)); cout << "Enter new filename" << endl; cin >> filename; //stbi_write_jpg(filename, width, height, channels, image, 100); for (int i = 0; i < bin_count; i++) { printf("Bin %d - %d\n", i, histogram[i]); } free(image); checkCudaErrors(hipFree(din_image)); //checkCudaErrors(hipFree(dout_image)); hipDeviceReset(); return 0; }
eb2f8f4f74fe73a54f5622566b59f905336279fc.cu
#include <string.h> #include <math.h> #include <float.h> #include "helper_cuda.h" #include "stdafx.h" #define STB_IMAGE_IMPLEMENTATION #include "stb_image.h" #define STB_IMAGE_WRITE_IMPLEMENTATION #include "stb_image_write.h" #include <stdio.h> #include <stdlib.h> #include <iostream> using namespace std; //////////////////////////////////////////////////////////////////////////////// // GPU routines //////////////////////////////////////////////////////////////////////////////// __global__ void GrayScale(int *g_outdata, unsigned char *g_indata, int bin_width) { int tid = (threadIdx.x + blockDim.x*blockIdx.x)*3; unsigned char gray = (g_indata[tid] + g_indata[tid+1] + g_indata[tid + 2])/3; atomicAdd(&g_outdata[gray / bin_width], 1); //g_outdata[tid] = gray; //g_outdata[tid+1] = gray; //g_outdata[tid+2] = gray; } int main(int argc, const char **argv) { int num_elements, num_threads, mem_size, num_blocks,bin_count,bin_width; int width, height, channels; char filename[20]; char picture[100]; //bool f; unsigned char *din_image; unsigned char *dout_image; int *out_hist; int *histogram; cout << "Enter image location:" << endl; cin >> picture; // User entering blocks count... cout << "Enter blocks count:" << endl; cin >> bin_count; bin_width = 255 / bin_count; if (bin_count % 255 != 0) bin_width += 1; unsigned char *image = stbi_load(picture, &width, &height, &channels, 3); if (!image) cout << "Unsuccessful loading!" << endl; else cout << "Image successfuly loaded" << endl; findCudaDevice(argc, argv); histogram = (int*) malloc(bin_count * sizeof(int)); mem_size = sizeof(char) * (width*height*3); checkCudaErrors(cudaMalloc((void**)&din_image, mem_size)); checkCudaErrors(cudaMalloc((void**)&out_hist, bin_count * sizeof(int))); //checkCudaErrors(cudaMalloc((void**)&dout_image, bin_count * sizeof(int))); checkCudaErrors(cudaMemcpy(din_image, image, mem_size, cudaMemcpyHostToDevice)); num_elements = width*height; num_threads = 1024; num_blocks = num_elements/num_threads; GrayScale << <num_blocks, num_threads >> > (out_hist, din_image,bin_width); getLastCudaError("GrayScale kernel execution failed"); // copy result from device to host //checkCudaErrors(cudaMemcpy(image, dout_image, mem_size, cudaMemcpyDeviceToHost)); checkCudaErrors(cudaMemcpy(histogram, out_hist, bin_count * sizeof(int), cudaMemcpyDeviceToHost)); cout << "Enter new filename" << endl; cin >> filename; //stbi_write_jpg(filename, width, height, channels, image, 100); for (int i = 0; i < bin_count; i++) { printf("Bin ¹ %d - %d\n", i, histogram[i]); } free(image); checkCudaErrors(cudaFree(din_image)); //checkCudaErrors(cudaFree(dout_image)); cudaDeviceReset(); return 0; }
f9899f68d19150809de4a43e9d9671b996a623a4.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include <stdio.h> __global__ void vector_add(int *a, int *b, int *c){ int index = blockIdx.x * blockDim.x + threadIdx.x; c[index] = a[index] + b[index] ; } #define N(2048*2048) #define THREADS_PER_BLOCK 512 int main(){ int *a, *b, *c; int *d_a, *d_b, *d_c; int size = N * sizeof( int ); // hipMalloc((void **) &d_a, size); hipMalloc((void **) &d_b, size); hipMalloc((void **) &d_c, size); a = (int *)malloc(size); b = (int *)malloc(size); c = (int *)malloc(size); for (int i=0; i<N; i++){ a[i] = b[i] = i; c[i] = 0; } // host gupmemory hipMemcpy(d_a, a, size, hipMemcpyHostToDevice); hipMemcpy(d_b, b, size, hipMemcpyHostToDevice); add<<, THREADS_PER_BLOCK>>(d_a, d_b, d_c); // GPU hipMemcpy(c, d_c, size, hipMemcpyDeviceToHost); printf("c[0]= %d\n", 0, c[0]) printf("c[%d] = %d", N-1, c[N-1]); // free(a); free(b); free(c); hipFree( d_a ); hipFree( d_b ); hipFree( d_c ); return 0; }
f9899f68d19150809de4a43e9d9671b996a623a4.cu
#include <stdio.h> __global__ void vector_add(int *a, int *b, int *c){ int index = blockIdx.x * blockDim.x + threadIdx.x; c[index] = a[index] + b[index] ; } #define N(2048*2048) #define THREADS_PER_BLOCK 512 int main(){ int *a, *b, *c; int *d_a, *d_b, *d_c; int size = N * sizeof( int ); // cudaMalloc((void **) &d_a, size); cudaMalloc((void **) &d_b, size); cudaMalloc((void **) &d_c, size); a = (int *)malloc(size); b = (int *)malloc(size); c = (int *)malloc(size); for (int i=0; i<N; i++){ a[i] = b[i] = i; c[i] = 0; } // 将数据从host内存 拷贝到 gup的memory中 cudaMemcpy(d_a, a, size, cudaMemcpyHostToDevice); cudaMemcpy(d_b, b, size, cudaMemcpyHostToDevice); add<<, THREADS_PER_BLOCK>>(d_a, d_b, d_c); // 将GPU计算结束的数据,拷贝到主机的内存 cudaMemcpy(c, d_c, size, cudaMemcpyDeviceToHost); printf("c[0]= %d\n", 0, c[0]) printf("c[%d] = %d", N-1, c[N-1]); // 释放内存 free(a); free(b); free(c); cudaFree( d_a ); cudaFree( d_b ); cudaFree( d_c ); return 0; }
96a21d55a2fea613e9bc26b70c9d85b0d8770e03.hip
// !!! This is a file automatically generated by hipify!!! #include <iostream> #include <fstream> #include <string> #include <cmath> #include <ctime> #include <ctime> #include <cstring> #include <stdlib.h> #include <stdio.h> #include <random> #include <chrono> #include <hip/hip_runtime.h> #include <hiprand/hiprand.h> #include <hiprand/hiprand_kernel.h> using namespace std; #define N 256 __global__ void BMwp(float f_new[], float dw[], const float dt, hiprandState_t *state) { int tid = threadIdx.x + blockIdx.x * blockDim.x; dw[tid] = dt*getrand(&state[tid]); f_new[tid+1] = f_new[tid] + dw[tid]; } __global__ void initialize(float f[], float x[]) { int tid = threadIdx.x + blockIdx.x*blockDim.x; x[tid] = 0.0f; f[tid] = 0.0f; } __global__ void init_r(hiprandState_t *state, unsigned long seed) { int idx = blockIdx.x * blockDim.x + threadIdx.x; hiprand_init(seed,0,0, &state[idx]); } __device__ float getrand(hiprandState_t *state) { return (float)(hiprand_normal(state)); } void io_fun(std::string file, float *x, const int N) { std::ofstream myfile_tsN; myfile_tsN.open(file); for(int i = 0; i< N; i++) { myfile_tsN << x[i] << std::endl; } myfile_tsN.close(); } int main() { const int T = 1; const float dt = sqrt(T/float(N)); size_t sz = N*sizeof(float); float *f, *dw, *devstate; f = new float[N]; dw = new float[N]; rin= new float[N]; float *d_f, *d_dw; hipMalloc(&d_f,sz); hipMalloc(&d_dw,sz); hipMalloc(&devstate, sz) dim3 dimBlock(16,1,1); dim3 dimGrid(N/dimBlock.x,1,1); hipLaunchKernelGGL(( initialize), dim3(dimGrid), dim3(dimBlock), 0, 0, d_f, d_dw); cudaDeviceSynchonize(); hipLaunchKernelGGL(( init_r), dim3(dimGrid), dim3(dimBlock), 0, 0, devstate,0); cudaDeviceSynchonize(); std::chrono::steady_clock::time_point start = std::chrono::steady_clock::now(); hipLaunchKernelGGL(( BMwp), dim3(dimGrid), dim3(dimBlock), 0, 0, d_f, d_dw, dt, devstate); cudaDeviceSynchonize(); std::chrono::steady_clock::time_point end = chrono::steady_clock::now(); cout << "Parallel Weiner Process time in microseconds: " << chrono::duration_cast<chrono::microseconds>(end - start).count() << " microseconds" << endl; hipMemcpy(f, d_f,sz, hipMemcpyDeviceToHost); hipMemcpy(dw, d_dw,sz,hipMemcpyDeviceToHost); std::string f3; f3 = "PARwp.dat"; io_fun(f3, f, N); delete f, dw; hipFree(d_f); hipFree(devstate); hipFree(d_dw); }
96a21d55a2fea613e9bc26b70c9d85b0d8770e03.cu
#include <iostream> #include <fstream> #include <string> #include <cmath> #include <ctime> #include <ctime> #include <cstring> #include <stdlib.h> #include <stdio.h> #include <random> #include <chrono> #include <cuda.h> #include <curand.h> #include <curand_kernel.h> using namespace std; #define N 256 __global__ void BMwp(float f_new[], float dw[], const float dt, curandState *state) { int tid = threadIdx.x + blockIdx.x * blockDim.x; dw[tid] = dt*getrand(&state[tid]); f_new[tid+1] = f_new[tid] + dw[tid]; } __global__ void initialize(float f[], float x[]) { int tid = threadIdx.x + blockIdx.x*blockDim.x; x[tid] = 0.0f; f[tid] = 0.0f; } __global__ void init_r(curandState *state, unsigned long seed) { int idx = blockIdx.x * blockDim.x + threadIdx.x; curand_init(seed,0,0, &state[idx]); } __device__ float getrand(curandState *state) { return (float)(curand_normal(state)); } void io_fun(std::string file, float *x, const int N) { std::ofstream myfile_tsN; myfile_tsN.open(file); for(int i = 0; i< N; i++) { myfile_tsN << x[i] << std::endl; } myfile_tsN.close(); } int main() { const int T = 1; const float dt = sqrt(T/float(N)); size_t sz = N*sizeof(float); float *f, *dw, *devstate; f = new float[N]; dw = new float[N]; rin= new float[N]; float *d_f, *d_dw; cudaMalloc(&d_f,sz); cudaMalloc(&d_dw,sz); cudaMalloc(&devstate, sz) dim3 dimBlock(16,1,1); dim3 dimGrid(N/dimBlock.x,1,1); initialize<<<dimGrid, dimBlock>>>(d_f, d_dw); cudaDeviceSynchonize(); init_r<<<dimGrid, dimBlock>>>(devstate,0); cudaDeviceSynchonize(); std::chrono::steady_clock::time_point start = std::chrono::steady_clock::now(); BMwp<<<dimGrid, dimBlock>>>(d_f, d_dw, dt, devstate); cudaDeviceSynchonize(); std::chrono::steady_clock::time_point end = chrono::steady_clock::now(); cout << "Parallel Weiner Process time in microseconds: " << chrono::duration_cast<chrono::microseconds>(end - start).count() << " microseconds" << endl; cudaMemcpy(f, d_f,sz, cudaMemcpyDeviceToHost); cudaMemcpy(dw, d_dw,sz,cudaMemcpyDeviceToHost); std::string f3; f3 = "PARwp.dat"; io_fun(f3, f, N); delete f, dw; cudaFree(d_f); cudaFree(devstate); cudaFree(d_dw); }
adb2d9104163950ac72f1bab86d87ff4b0ae5c00.hip
// !!! This is a file automatically generated by hipify!!! #include <iostream> #include <hip/hip_runtime.h> #include <cusolverSp.h> #include <thrust/host_vector.h> #include <thrust/device_vector.h> using namespace std; int main() { cusolverSpHandle_t handle; cusolverStatus_t status; hipsparseStatus_t status2; int n = 500000; // create handle status = cusolverSpCreate(&handle); if(status != CUSOLVER_STATUS_SUCCESS) { cerr<<"failed to create cusolver handle\n"; return -1; } else { cerr<<"succeeded to create cusolverhandle\n"; } // create matrix descriptor hipsparseMatDescr_t descr; status2 = hipsparseCreateMatDescr(&descr); if(status2 != HIPSPARSE_STATUS_SUCCESS) { cerr<<"failed to create matrix descriptor\n"; return -1; } else { cerr<<"succeeded to create matrix descriptor\n"; } // allocate A and b on host int nnz = 3*n-2; thrust::host_vector<double> h_csrValA(nnz,0); thrust::host_vector<int> h_csrRowPtrA(n+1,0); thrust::host_vector<int> h_csrColIndA(nnz,0); thrust::host_vector<double> h_b(n,0); thrust::host_vector<double> h_x(n,0); h_csrValA[0] = 4; h_csrValA[1] = -1; h_csrRowPtrA[0] = 0; h_csrRowPtrA[1] = 2; h_csrColIndA[0] = 0; h_csrColIndA[1] = 1; h_b[0] = 3; for(int i=1; i<n-1; i++) { h_csrRowPtrA[i] = 2+3*i-3; h_csrValA[2+3*i-3] = -1; h_csrValA[2+3*i-2] = 4; h_csrValA[2+3*i-1] = -1; h_csrColIndA[2+3*i-3] = i-1; h_csrColIndA[2+3*i-2] = i; h_csrColIndA[2+3*i-1] = i+1; h_b[i] = 2; } h_csrValA[nnz-2] = -1; h_csrValA[nnz-1] = 4; h_csrColIndA[nnz-2] = n-2; h_csrColIndA[nnz-1] = n-1; h_csrRowPtrA[n-1] = nnz-2; h_csrRowPtrA[n] = nnz; h_b[n-1] = 3; // allocate A and b on device thrust::device_vector<double> d_csrValA(h_csrValA); thrust::device_vector<int> d_csrRowPtrA(h_csrRowPtrA); thrust::device_vector<int> d_csrColIndA(h_csrColIndA); thrust::device_vector<double> d_b(h_b); thrust::device_vector<double> d_x(h_x); // raw pointers double* ptrValA = thrust::raw_pointer_cast(&d_csrValA[0]); int* ptrRowA = thrust::raw_pointer_cast(&d_csrRowPtrA[0]); int* ptrColA = thrust::raw_pointer_cast(&d_csrColIndA[0]); double* ptrb = thrust::raw_pointer_cast(&d_b[0]); double* ptrx = thrust::raw_pointer_cast(&d_x[0]); // solve cout<<"start solving...\n"; double tol = 1e-16; int reorder = 1; int singularity = 0; status = cusolverSpDcsrlsvqr(handle,n,nnz,descr,ptrValA,ptrRowA,ptrColA,ptrb,tol, reorder,ptrx,&singularity); // status = cusolverSpDcsrlsvqrHost(handle,n,nnz,descr,h_csrValA,h_csrRowPtrA,h_csrColIndA,h_b,tol, // reorder,h_x,&singularity); cout<<"end solving...\n"; thrust::copy(d_x.begin(), d_x.end(), h_x.begin()); cout<<"singularity = "<<singularity<<"\n"; if(status != CUSOLVER_STATUS_SUCCESS) { cerr<<"failed to solve\n"; return -1; } else { cerr<<"succeeded to solve\n"; } cout<<"x[0] = "<<h_x[0]<<"\n"; cout<<"x[n-1] = "<<h_x[n-1]<<"\n"; // clear cusolverSpDestroy(handle); return 0; }
adb2d9104163950ac72f1bab86d87ff4b0ae5c00.cu
#include <iostream> #include <cuda.h> #include <cusolverSp.h> #include <thrust/host_vector.h> #include <thrust/device_vector.h> using namespace std; int main() { cusolverSpHandle_t handle; cusolverStatus_t status; cusparseStatus_t status2; int n = 500000; // create handle status = cusolverSpCreate(&handle); if(status != CUSOLVER_STATUS_SUCCESS) { cerr<<"failed to create cusolver handle\n"; return -1; } else { cerr<<"succeeded to create cusolverhandle\n"; } // create matrix descriptor cusparseMatDescr_t descr; status2 = cusparseCreateMatDescr(&descr); if(status2 != CUSPARSE_STATUS_SUCCESS) { cerr<<"failed to create matrix descriptor\n"; return -1; } else { cerr<<"succeeded to create matrix descriptor\n"; } // allocate A and b on host int nnz = 3*n-2; thrust::host_vector<double> h_csrValA(nnz,0); thrust::host_vector<int> h_csrRowPtrA(n+1,0); thrust::host_vector<int> h_csrColIndA(nnz,0); thrust::host_vector<double> h_b(n,0); thrust::host_vector<double> h_x(n,0); h_csrValA[0] = 4; h_csrValA[1] = -1; h_csrRowPtrA[0] = 0; h_csrRowPtrA[1] = 2; h_csrColIndA[0] = 0; h_csrColIndA[1] = 1; h_b[0] = 3; for(int i=1; i<n-1; i++) { h_csrRowPtrA[i] = 2+3*i-3; h_csrValA[2+3*i-3] = -1; h_csrValA[2+3*i-2] = 4; h_csrValA[2+3*i-1] = -1; h_csrColIndA[2+3*i-3] = i-1; h_csrColIndA[2+3*i-2] = i; h_csrColIndA[2+3*i-1] = i+1; h_b[i] = 2; } h_csrValA[nnz-2] = -1; h_csrValA[nnz-1] = 4; h_csrColIndA[nnz-2] = n-2; h_csrColIndA[nnz-1] = n-1; h_csrRowPtrA[n-1] = nnz-2; h_csrRowPtrA[n] = nnz; h_b[n-1] = 3; // allocate A and b on device thrust::device_vector<double> d_csrValA(h_csrValA); thrust::device_vector<int> d_csrRowPtrA(h_csrRowPtrA); thrust::device_vector<int> d_csrColIndA(h_csrColIndA); thrust::device_vector<double> d_b(h_b); thrust::device_vector<double> d_x(h_x); // raw pointers double* ptrValA = thrust::raw_pointer_cast(&d_csrValA[0]); int* ptrRowA = thrust::raw_pointer_cast(&d_csrRowPtrA[0]); int* ptrColA = thrust::raw_pointer_cast(&d_csrColIndA[0]); double* ptrb = thrust::raw_pointer_cast(&d_b[0]); double* ptrx = thrust::raw_pointer_cast(&d_x[0]); // solve cout<<"start solving...\n"; double tol = 1e-16; int reorder = 1; int singularity = 0; status = cusolverSpDcsrlsvqr(handle,n,nnz,descr,ptrValA,ptrRowA,ptrColA,ptrb,tol, reorder,ptrx,&singularity); // status = cusolverSpDcsrlsvqrHost(handle,n,nnz,descr,h_csrValA,h_csrRowPtrA,h_csrColIndA,h_b,tol, // reorder,h_x,&singularity); cout<<"end solving...\n"; thrust::copy(d_x.begin(), d_x.end(), h_x.begin()); cout<<"singularity = "<<singularity<<"\n"; if(status != CUSOLVER_STATUS_SUCCESS) { cerr<<"failed to solve\n"; return -1; } else { cerr<<"succeeded to solve\n"; } cout<<"x[0] = "<<h_x[0]<<"\n"; cout<<"x[n-1] = "<<h_x[n-1]<<"\n"; // clear cusolverSpDestroy(handle); return 0; }
f479977fc5445772626660f89151e01e4c0015da.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /** * @file compare.cu * @brief element wise product * @author HIKARU KONDO * @date 2021/08/24 */ #include "element_wise_operator.cuh" #define BLOCKDIM 256 template<typename T> __global__ void element_wise_product(T *arrayA, T *arrayB, T *resArray, int size) { unsigned int idx = threadIdx.x + blockIdx.x * blockDim.x; if (idx >= size) { return ; } resArray[idx] = arrayA[idx] * arrayB[idx]; } template<typename T> __global__ void element_wise_devide(T *arrayA, T *arrayB, T *resArray, int size) { unsigned int idx = threadIdx.x + blockIdx.x * blockDim.x; if (idx >= size) { return ; } resArray[idx] = arrayA[idx] / arrayB[idx]; } void float_element_wise_product(float *arrayA, float *arrayB, float *resArray, int size) { dim3 blockDim(BLOCKDIM); dim3 gridDim((size + blockDim.x - 1) / blockDim.x); hipLaunchKernelGGL(( element_wise_product), dim3(gridDim), dim3(blockDim), 0, 0, arrayA, arrayB, resArray, size); } void float_element_wise_devide(float *arrayA, float *arrayB, float *resArray, int size) { dim3 blockDim(BLOCKDIM); dim3 gridDim((size + blockDim.x - 1) / blockDim.x); hipLaunchKernelGGL(( element_wise_devide), dim3(gridDim), dim3(blockDim) , 0, 0, arrayA, arrayB, resArray, size); } void double_element_wise_product(double *arrayA, double *arrayB, double *resArray, int size) { dim3 blockDim(BLOCKDIM); dim3 gridDim((size + blockDim.x - 1) / blockDim.x); hipLaunchKernelGGL(( element_wise_product), dim3(gridDim), dim3(blockDim), 0, 0, arrayA, arrayB, resArray, size); } void double_element_wise_devide(double *arrayA, double *arrayB, double *resArray, int size) { dim3 blockDim(BLOCKDIM); dim3 gridDim((size + blockDim.x - 1) / blockDim.x); hipLaunchKernelGGL(( element_wise_devide), dim3(gridDim), dim3(blockDim) , 0, 0, arrayA, arrayB, resArray, size); }
f479977fc5445772626660f89151e01e4c0015da.cu
/** * @file compare.cu * @brief element wise product * @author HIKARU KONDO * @date 2021/08/24 */ #include "element_wise_operator.cuh" #define BLOCKDIM 256 template<typename T> __global__ void element_wise_product(T *arrayA, T *arrayB, T *resArray, int size) { unsigned int idx = threadIdx.x + blockIdx.x * blockDim.x; if (idx >= size) { return ; } resArray[idx] = arrayA[idx] * arrayB[idx]; } template<typename T> __global__ void element_wise_devide(T *arrayA, T *arrayB, T *resArray, int size) { unsigned int idx = threadIdx.x + blockIdx.x * blockDim.x; if (idx >= size) { return ; } resArray[idx] = arrayA[idx] / arrayB[idx]; } void float_element_wise_product(float *arrayA, float *arrayB, float *resArray, int size) { dim3 blockDim(BLOCKDIM); dim3 gridDim((size + blockDim.x - 1) / blockDim.x); element_wise_product<<< gridDim, blockDim>>> (arrayA, arrayB, resArray, size); } void float_element_wise_devide(float *arrayA, float *arrayB, float *resArray, int size) { dim3 blockDim(BLOCKDIM); dim3 gridDim((size + blockDim.x - 1) / blockDim.x); element_wise_devide<<< gridDim, blockDim >>> (arrayA, arrayB, resArray, size); } void double_element_wise_product(double *arrayA, double *arrayB, double *resArray, int size) { dim3 blockDim(BLOCKDIM); dim3 gridDim((size + blockDim.x - 1) / blockDim.x); element_wise_product<<< gridDim, blockDim>>> (arrayA, arrayB, resArray, size); } void double_element_wise_devide(double *arrayA, double *arrayB, double *resArray, int size) { dim3 blockDim(BLOCKDIM); dim3 gridDim((size + blockDim.x - 1) / blockDim.x); element_wise_devide<<< gridDim, blockDim >>> (arrayA, arrayB, resArray, size); }
b522e6208012542830c5edfa47cc4fd09d56583f.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" // Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. #include <limits> #include "paddle/fluid/operators/amp/fp16_type_traits.h" #include "paddle/fluid/operators/log_softmax_op.h" #include "paddle/fluid/platform/cuda_device_function.h" namespace paddle { namespace operators { #define LAUNCH_WARP_FORWAR_COMPUTE(near_greater_power_of_two) \ case near_greater_power_of_two: \ hipLaunchKernelGGL(( ComputeLogSoftmaxForwardInWarp< \ T, AccT, near_greater_power_of_two>), dim3(blocks), dim3(threads), 0, stream, \ dst, src, outer_size, dim_size); \ break; template <typename T, int KernelWarpSize> __device__ __forceinline__ T WarpReduceSum(T value) { #pragma unroll for (int offset = KernelWarpSize / 2; offset > 0; offset /= 2) { T sum_val = platform::CudaShuffleXorSync(0xFFFFFFFF, value, offset); value = value + sum_val; } return value; } template <typename T, int KernelWarpSize> __device__ __forceinline__ T WarpReduceMax(T value) { #pragma unroll for (int offset = KernelWarpSize / 2; offset > 0; offset /= 2) { T max_val = platform::CudaShuffleXorSync(0xFFFFFFFF, value, offset); value = max(value, max_val); } return value; } int GetNearGreaterPowerOfTwo(int value) { int log2_value = 0; while ((1 << log2_value) < value) { ++log2_value; } return 1 << log2_value; } template <typename T, typename AccT, int NearGreaterPowerOfTwo> __global__ void ComputeLogSoftmaxForwardInWarp(T *dst, const T *src, int batch_size, int element_count) { constexpr int near_greater_power_of_two = NearGreaterPowerOfTwo; constexpr int kernel_warp_size = (near_greater_power_of_two < 32) ? near_greater_power_of_two : 32; constexpr int warp_iter = near_greater_power_of_two / kernel_warp_size; int batch_id = blockDim.y * blockIdx.x + threadIdx.y; int thread_in_warp_idx = threadIdx.x; // 1.read data from global memory to registers AccT elements[warp_iter]; // set effective_element_count as the num of elements when warps do effective // work // set effective_element_count as 0, when warps do ineffective work int effective_element_count = (batch_id < batch_size) ? element_count : 0; for (int it = 0; it < warp_iter; ++it) { int element_index = thread_in_warp_idx + it * kernel_warp_size; if (element_index < effective_element_count) { elements[it] = static_cast<AccT>(src[batch_id * element_count + element_index]); } else { elements[it] = -std::numeric_limits<AccT>::infinity(); } } // 2.compute max_value. For each thread, loop all registers to find max AccT max_value = elements[0]; #pragma unroll for (int it = 1; it < warp_iter; ++it) { max_value = (max_value > elements[it]) ? max_value : elements[it]; } max_value = WarpReduceMax<AccT, kernel_warp_size>(max_value); // 3.For each warp, accumulate all thread registers AccT sum = 0.0f; #pragma unroll for (int it = 0; it < warp_iter; ++it) { sum += ::exp(elements[it] - max_value); } sum = WarpReduceSum<AccT, kernel_warp_size>(sum); // 4.store result. sum = ::log(sum); #pragma unroll for (int it = 0; it < warp_iter; ++it) { int element_index = thread_in_warp_idx + it * kernel_warp_size; if (element_index < effective_element_count) { dst[batch_id * element_count + element_index] = static_cast<T>(elements[it] - max_value - sum); } else { break; } } } template <typename T, typename AccT> void LaunchSoftmaxForwardForLastAxis(T *dst, const T *src, int dim_size, int outer_size, gpuStream_t stream) { int threads_per_block = 128; int near_greater_power_of_two = GetNearGreaterPowerOfTwo(dim_size); int kernel_warp_size = (near_greater_power_of_two < 32) ? near_greater_power_of_two : 32; int warps_per_block = (threads_per_block / kernel_warp_size); int blocks = (outer_size + warps_per_block - 1) / warps_per_block; dim3 threads(kernel_warp_size, warps_per_block, 1); switch (near_greater_power_of_two) { LAUNCH_WARP_FORWAR_COMPUTE(1); LAUNCH_WARP_FORWAR_COMPUTE(2); LAUNCH_WARP_FORWAR_COMPUTE(4); // dim_size: 3~4 LAUNCH_WARP_FORWAR_COMPUTE(8); // dim_size: 5~8 LAUNCH_WARP_FORWAR_COMPUTE(16); // dim_size: 9~16 LAUNCH_WARP_FORWAR_COMPUTE(32); // dim_size: 17~32 LAUNCH_WARP_FORWAR_COMPUTE(64); // dim_size: 33~64 LAUNCH_WARP_FORWAR_COMPUTE(128); // dim_size 65~128 LAUNCH_WARP_FORWAR_COMPUTE(256); // dim_size 129~256 LAUNCH_WARP_FORWAR_COMPUTE(512); // dim_size 257~512 LAUNCH_WARP_FORWAR_COMPUTE(1024); // dim_size 513~1024 default: break; } } template <typename T> class LogSoftmaxKernel<platform::CUDADeviceContext, T> : public framework::OpKernel<T> { using MPDType = typename details::MPTypeTrait<T>::Type; public: void Compute(const framework::ExecutionContext &context) const override { const auto *x = context.Input<framework::Tensor>("X"); auto *out = context.Output<framework::Tensor>("Out"); const auto *input_data = x->data<T>(); auto *output_data = out->mutable_data<T>(context.GetPlace()); const int rank = x->dims().size(); const int axis = CanonicalAxis(context.Attr<int>("axis"), rank); int dim_size = x->dims()[axis]; int inner_size = 1; for (int i = axis + 1; i < x->dims().size(); ++i) { inner_size *= x->dims()[i]; } int outer_size = SizeToAxis(axis, x->dims()); gpuStream_t stream = context.cuda_device_context().stream(); if (inner_size == 1 && dim_size <= 1024 && dim_size * sizeof(T) <= 4096) { LaunchSoftmaxForwardForLastAxis<T, MPDType>(output_data, input_data, dim_size, outer_size, stream); } else { LogSoftmaxFunctor<platform::CUDADeviceContext, T>()( context.template device_context<platform::CUDADeviceContext>(), x, out, axis); } } }; // Backward below #define LAUNCH_WARP_BACKWARD_COMPUTE(near_greater_power_of_two) \ case near_greater_power_of_two: \ hipLaunchKernelGGL(( ComputeLogSoftmaxBackwardInWarp< \ T, AccT, near_greater_power_of_two>), dim3(blocks), dim3(threads), 0, stream, \ output, grad_output, grad_input, outer_size, dim_size); \ break; template <typename T, typename AccT, int NearGreaterPowerOfTwo> __global__ void ComputeLogSoftmaxBackwardInWarp(const T *output, const T *grad_output, T *grad_input, int batch_size, int element_count) { constexpr int near_greater_power_of_two = NearGreaterPowerOfTwo; constexpr int kernel_warp_size = (near_greater_power_of_two < 32) ? near_greater_power_of_two : 32; constexpr int warp_iter = near_greater_power_of_two / kernel_warp_size; int batch_id = blockDim.y * blockIdx.x + threadIdx.y; int thread_in_warp_idx = threadIdx.x % kernel_warp_size; // 1.read data from global memory to registers AccT output_register[warp_iter]; AccT grad_output_register[warp_iter]; int effective_element_count = (batch_id < batch_size) ? element_count : 0; for (int iter = 0; iter < warp_iter; ++iter) { int element_index = thread_in_warp_idx + iter * kernel_warp_size; if (element_index < effective_element_count) { output_register[iter] = static_cast<AccT>(output[batch_id * element_count + element_index]); grad_output_register[iter] = static_cast<AccT>( grad_output[batch_id * element_count + element_index]); } else { output_register[iter] = AccT(0); grad_output_register[iter] = AccT(0); } } // 2. For each warp, accumulate all thread registers AccT sum = grad_output_register[0]; #pragma unroll for (int iter = 1; iter < warp_iter; ++iter) { sum += grad_output_register[iter]; } sum = WarpReduceSum<AccT, kernel_warp_size>(sum); // 3. write result in grad_input #pragma unroll for (int iter = 0; iter < warp_iter; ++iter) { int element_index = thread_in_warp_idx + iter * kernel_warp_size; if (element_index < effective_element_count) { grad_input[batch_id * element_count + element_index] = static_cast<T>( (grad_output_register[iter] - ::exp(output_register[iter]) * sum)); } } } template <typename T, typename AccT> void LaunchSoftmaxBackwardForLastAxis(T *grad_input, const T *grad_output, const T *output, int dim_size, int outer_size, gpuStream_t stream) { int threads_per_block = 128; int near_greater_power_of_two = GetNearGreaterPowerOfTwo(dim_size); int kernel_warp_size = (near_greater_power_of_two < 32) ? near_greater_power_of_two : 32; int warps_per_block = (threads_per_block / kernel_warp_size); int blocks = (outer_size + warps_per_block - 1) / warps_per_block; dim3 threads(kernel_warp_size, warps_per_block, 1); switch (near_greater_power_of_two) { LAUNCH_WARP_BACKWARD_COMPUTE(1); // dim_size: 1 LAUNCH_WARP_BACKWARD_COMPUTE(2); // dim_size: 2 LAUNCH_WARP_BACKWARD_COMPUTE(4); // dim_size: 3~4 LAUNCH_WARP_BACKWARD_COMPUTE(8); // dim_size: 5~8 LAUNCH_WARP_BACKWARD_COMPUTE(16); // dim_size: 9~16 LAUNCH_WARP_BACKWARD_COMPUTE(32); // dim_size: 17~32 LAUNCH_WARP_BACKWARD_COMPUTE(64); // dim_size: 33~64 LAUNCH_WARP_BACKWARD_COMPUTE(128); // dim_size: 65~128 LAUNCH_WARP_BACKWARD_COMPUTE(256); // dim_size: 129~256 LAUNCH_WARP_BACKWARD_COMPUTE(512); // dim_size: 257~512 LAUNCH_WARP_BACKWARD_COMPUTE(1024); // dim_size: 513~1024 default: break; } } template <typename T> class LogSoftmaxGradKernel<platform::CUDADeviceContext, T> : public framework::OpKernel<T> { using MPDType = typename details::MPTypeTrait<T>::Type; public: void Compute(const framework::ExecutionContext &context) const override { const auto *out = context.Input<framework::Tensor>("Out"); const auto *g_out = context.Input<framework::Tensor>(framework::GradVarName("Out")); auto *g_x = context.Output<framework::Tensor>(framework::GradVarName("X")); const auto *out_data = out->data<T>(); const auto *g_out_data = g_out->data<T>(); auto *g_x_data = g_x->mutable_data<T>(context.GetPlace()); const int rank = out->dims().size(); const int axis = CanonicalAxis(context.Attr<int>("axis"), rank); int dim_size = out->dims()[axis]; int inner_size = 1; for (int i = axis + 1; i < out->dims().size(); ++i) { inner_size *= out->dims()[i]; } int outer_size = SizeToAxis(axis, out->dims()); gpuStream_t stream = context.cuda_device_context().stream(); if (inner_size == 1 && dim_size <= 1024 && dim_size * sizeof(T) <= 4096) { LaunchSoftmaxBackwardForLastAxis<T, MPDType>( g_x_data, g_out_data, out_data, dim_size, outer_size, stream); } else { LogSoftmaxGradFunctor<platform::CUDADeviceContext, T>()( context.template device_context<platform::CUDADeviceContext>(), out, g_out, g_x, axis); } } }; } // operators } // paddle namespace ops = paddle::operators; namespace plat = paddle::platform; REGISTER_OP_CUDA_KERNEL( log_softmax, ops::LogSoftmaxKernel<plat::CUDADeviceContext, float>, ops::LogSoftmaxKernel<plat::CUDADeviceContext, double>, ops::LogSoftmaxKernel<plat::CUDADeviceContext, plat::float16>); REGISTER_OP_CUDA_KERNEL( log_softmax_grad, ops::LogSoftmaxGradKernel<plat::CUDADeviceContext, float>, ops::LogSoftmaxGradKernel<plat::CUDADeviceContext, double>, ops::LogSoftmaxGradKernel<plat::CUDADeviceContext, plat::float16>);
b522e6208012542830c5edfa47cc4fd09d56583f.cu
// Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. #include <limits> #include "paddle/fluid/operators/amp/fp16_type_traits.h" #include "paddle/fluid/operators/log_softmax_op.h" #include "paddle/fluid/platform/cuda_device_function.h" namespace paddle { namespace operators { #define LAUNCH_WARP_FORWAR_COMPUTE(near_greater_power_of_two) \ case near_greater_power_of_two: \ ComputeLogSoftmaxForwardInWarp< \ T, AccT, near_greater_power_of_two><<<blocks, threads, 0, stream>>>( \ dst, src, outer_size, dim_size); \ break; template <typename T, int KernelWarpSize> __device__ __forceinline__ T WarpReduceSum(T value) { #pragma unroll for (int offset = KernelWarpSize / 2; offset > 0; offset /= 2) { T sum_val = platform::CudaShuffleXorSync(0xFFFFFFFF, value, offset); value = value + sum_val; } return value; } template <typename T, int KernelWarpSize> __device__ __forceinline__ T WarpReduceMax(T value) { #pragma unroll for (int offset = KernelWarpSize / 2; offset > 0; offset /= 2) { T max_val = platform::CudaShuffleXorSync(0xFFFFFFFF, value, offset); value = max(value, max_val); } return value; } int GetNearGreaterPowerOfTwo(int value) { int log2_value = 0; while ((1 << log2_value) < value) { ++log2_value; } return 1 << log2_value; } template <typename T, typename AccT, int NearGreaterPowerOfTwo> __global__ void ComputeLogSoftmaxForwardInWarp(T *dst, const T *src, int batch_size, int element_count) { constexpr int near_greater_power_of_two = NearGreaterPowerOfTwo; constexpr int kernel_warp_size = (near_greater_power_of_two < 32) ? near_greater_power_of_two : 32; constexpr int warp_iter = near_greater_power_of_two / kernel_warp_size; int batch_id = blockDim.y * blockIdx.x + threadIdx.y; int thread_in_warp_idx = threadIdx.x; // 1.read data from global memory to registers AccT elements[warp_iter]; // set effective_element_count as the num of elements when warps do effective // work // set effective_element_count as 0, when warps do ineffective work int effective_element_count = (batch_id < batch_size) ? element_count : 0; for (int it = 0; it < warp_iter; ++it) { int element_index = thread_in_warp_idx + it * kernel_warp_size; if (element_index < effective_element_count) { elements[it] = static_cast<AccT>(src[batch_id * element_count + element_index]); } else { elements[it] = -std::numeric_limits<AccT>::infinity(); } } // 2.compute max_value. For each thread, loop all registers to find max AccT max_value = elements[0]; #pragma unroll for (int it = 1; it < warp_iter; ++it) { max_value = (max_value > elements[it]) ? max_value : elements[it]; } max_value = WarpReduceMax<AccT, kernel_warp_size>(max_value); // 3.For each warp, accumulate all thread registers AccT sum = 0.0f; #pragma unroll for (int it = 0; it < warp_iter; ++it) { sum += std::exp(elements[it] - max_value); } sum = WarpReduceSum<AccT, kernel_warp_size>(sum); // 4.store result. sum = std::log(sum); #pragma unroll for (int it = 0; it < warp_iter; ++it) { int element_index = thread_in_warp_idx + it * kernel_warp_size; if (element_index < effective_element_count) { dst[batch_id * element_count + element_index] = static_cast<T>(elements[it] - max_value - sum); } else { break; } } } template <typename T, typename AccT> void LaunchSoftmaxForwardForLastAxis(T *dst, const T *src, int dim_size, int outer_size, gpuStream_t stream) { int threads_per_block = 128; int near_greater_power_of_two = GetNearGreaterPowerOfTwo(dim_size); int kernel_warp_size = (near_greater_power_of_two < 32) ? near_greater_power_of_two : 32; int warps_per_block = (threads_per_block / kernel_warp_size); int blocks = (outer_size + warps_per_block - 1) / warps_per_block; dim3 threads(kernel_warp_size, warps_per_block, 1); switch (near_greater_power_of_two) { LAUNCH_WARP_FORWAR_COMPUTE(1); LAUNCH_WARP_FORWAR_COMPUTE(2); LAUNCH_WARP_FORWAR_COMPUTE(4); // dim_size: 3~4 LAUNCH_WARP_FORWAR_COMPUTE(8); // dim_size: 5~8 LAUNCH_WARP_FORWAR_COMPUTE(16); // dim_size: 9~16 LAUNCH_WARP_FORWAR_COMPUTE(32); // dim_size: 17~32 LAUNCH_WARP_FORWAR_COMPUTE(64); // dim_size: 33~64 LAUNCH_WARP_FORWAR_COMPUTE(128); // dim_size 65~128 LAUNCH_WARP_FORWAR_COMPUTE(256); // dim_size 129~256 LAUNCH_WARP_FORWAR_COMPUTE(512); // dim_size 257~512 LAUNCH_WARP_FORWAR_COMPUTE(1024); // dim_size 513~1024 default: break; } } template <typename T> class LogSoftmaxKernel<platform::CUDADeviceContext, T> : public framework::OpKernel<T> { using MPDType = typename details::MPTypeTrait<T>::Type; public: void Compute(const framework::ExecutionContext &context) const override { const auto *x = context.Input<framework::Tensor>("X"); auto *out = context.Output<framework::Tensor>("Out"); const auto *input_data = x->data<T>(); auto *output_data = out->mutable_data<T>(context.GetPlace()); const int rank = x->dims().size(); const int axis = CanonicalAxis(context.Attr<int>("axis"), rank); int dim_size = x->dims()[axis]; int inner_size = 1; for (int i = axis + 1; i < x->dims().size(); ++i) { inner_size *= x->dims()[i]; } int outer_size = SizeToAxis(axis, x->dims()); gpuStream_t stream = context.cuda_device_context().stream(); if (inner_size == 1 && dim_size <= 1024 && dim_size * sizeof(T) <= 4096) { LaunchSoftmaxForwardForLastAxis<T, MPDType>(output_data, input_data, dim_size, outer_size, stream); } else { LogSoftmaxFunctor<platform::CUDADeviceContext, T>()( context.template device_context<platform::CUDADeviceContext>(), x, out, axis); } } }; // Backward below #define LAUNCH_WARP_BACKWARD_COMPUTE(near_greater_power_of_two) \ case near_greater_power_of_two: \ ComputeLogSoftmaxBackwardInWarp< \ T, AccT, near_greater_power_of_two><<<blocks, threads, 0, stream>>>( \ output, grad_output, grad_input, outer_size, dim_size); \ break; template <typename T, typename AccT, int NearGreaterPowerOfTwo> __global__ void ComputeLogSoftmaxBackwardInWarp(const T *output, const T *grad_output, T *grad_input, int batch_size, int element_count) { constexpr int near_greater_power_of_two = NearGreaterPowerOfTwo; constexpr int kernel_warp_size = (near_greater_power_of_two < 32) ? near_greater_power_of_two : 32; constexpr int warp_iter = near_greater_power_of_two / kernel_warp_size; int batch_id = blockDim.y * blockIdx.x + threadIdx.y; int thread_in_warp_idx = threadIdx.x % kernel_warp_size; // 1.read data from global memory to registers AccT output_register[warp_iter]; AccT grad_output_register[warp_iter]; int effective_element_count = (batch_id < batch_size) ? element_count : 0; for (int iter = 0; iter < warp_iter; ++iter) { int element_index = thread_in_warp_idx + iter * kernel_warp_size; if (element_index < effective_element_count) { output_register[iter] = static_cast<AccT>(output[batch_id * element_count + element_index]); grad_output_register[iter] = static_cast<AccT>( grad_output[batch_id * element_count + element_index]); } else { output_register[iter] = AccT(0); grad_output_register[iter] = AccT(0); } } // 2. For each warp, accumulate all thread registers AccT sum = grad_output_register[0]; #pragma unroll for (int iter = 1; iter < warp_iter; ++iter) { sum += grad_output_register[iter]; } sum = WarpReduceSum<AccT, kernel_warp_size>(sum); // 3. write result in grad_input #pragma unroll for (int iter = 0; iter < warp_iter; ++iter) { int element_index = thread_in_warp_idx + iter * kernel_warp_size; if (element_index < effective_element_count) { grad_input[batch_id * element_count + element_index] = static_cast<T>( (grad_output_register[iter] - std::exp(output_register[iter]) * sum)); } } } template <typename T, typename AccT> void LaunchSoftmaxBackwardForLastAxis(T *grad_input, const T *grad_output, const T *output, int dim_size, int outer_size, gpuStream_t stream) { int threads_per_block = 128; int near_greater_power_of_two = GetNearGreaterPowerOfTwo(dim_size); int kernel_warp_size = (near_greater_power_of_two < 32) ? near_greater_power_of_two : 32; int warps_per_block = (threads_per_block / kernel_warp_size); int blocks = (outer_size + warps_per_block - 1) / warps_per_block; dim3 threads(kernel_warp_size, warps_per_block, 1); switch (near_greater_power_of_two) { LAUNCH_WARP_BACKWARD_COMPUTE(1); // dim_size: 1 LAUNCH_WARP_BACKWARD_COMPUTE(2); // dim_size: 2 LAUNCH_WARP_BACKWARD_COMPUTE(4); // dim_size: 3~4 LAUNCH_WARP_BACKWARD_COMPUTE(8); // dim_size: 5~8 LAUNCH_WARP_BACKWARD_COMPUTE(16); // dim_size: 9~16 LAUNCH_WARP_BACKWARD_COMPUTE(32); // dim_size: 17~32 LAUNCH_WARP_BACKWARD_COMPUTE(64); // dim_size: 33~64 LAUNCH_WARP_BACKWARD_COMPUTE(128); // dim_size: 65~128 LAUNCH_WARP_BACKWARD_COMPUTE(256); // dim_size: 129~256 LAUNCH_WARP_BACKWARD_COMPUTE(512); // dim_size: 257~512 LAUNCH_WARP_BACKWARD_COMPUTE(1024); // dim_size: 513~1024 default: break; } } template <typename T> class LogSoftmaxGradKernel<platform::CUDADeviceContext, T> : public framework::OpKernel<T> { using MPDType = typename details::MPTypeTrait<T>::Type; public: void Compute(const framework::ExecutionContext &context) const override { const auto *out = context.Input<framework::Tensor>("Out"); const auto *g_out = context.Input<framework::Tensor>(framework::GradVarName("Out")); auto *g_x = context.Output<framework::Tensor>(framework::GradVarName("X")); const auto *out_data = out->data<T>(); const auto *g_out_data = g_out->data<T>(); auto *g_x_data = g_x->mutable_data<T>(context.GetPlace()); const int rank = out->dims().size(); const int axis = CanonicalAxis(context.Attr<int>("axis"), rank); int dim_size = out->dims()[axis]; int inner_size = 1; for (int i = axis + 1; i < out->dims().size(); ++i) { inner_size *= out->dims()[i]; } int outer_size = SizeToAxis(axis, out->dims()); gpuStream_t stream = context.cuda_device_context().stream(); if (inner_size == 1 && dim_size <= 1024 && dim_size * sizeof(T) <= 4096) { LaunchSoftmaxBackwardForLastAxis<T, MPDType>( g_x_data, g_out_data, out_data, dim_size, outer_size, stream); } else { LogSoftmaxGradFunctor<platform::CUDADeviceContext, T>()( context.template device_context<platform::CUDADeviceContext>(), out, g_out, g_x, axis); } } }; } // operators } // paddle namespace ops = paddle::operators; namespace plat = paddle::platform; REGISTER_OP_CUDA_KERNEL( log_softmax, ops::LogSoftmaxKernel<plat::CUDADeviceContext, float>, ops::LogSoftmaxKernel<plat::CUDADeviceContext, double>, ops::LogSoftmaxKernel<plat::CUDADeviceContext, plat::float16>); REGISTER_OP_CUDA_KERNEL( log_softmax_grad, ops::LogSoftmaxGradKernel<plat::CUDADeviceContext, float>, ops::LogSoftmaxGradKernel<plat::CUDADeviceContext, double>, ops::LogSoftmaxGradKernel<plat::CUDADeviceContext, plat::float16>);
f412a325c765dc666ac1b35b4f68cf9118dbde49.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" // Homework 2 // Image Blurring // // In this homework we are blurring an image. To do this, imagine that we have // a square array of weight values. For each pixel in the image, imagine that we // overlay this square array of weights on top of the image such that the center // of the weight array is aligned with the current pixel. To compute a blurred // pixel value, we multiply each pair of numbers that line up. In other words, we // multiply each weight with the pixel underneath it. Finally, we add up all of the // multiplied numbers and assign that value to our output for the current pixel. // We repeat this process for all the pixels in the image. // To help get you started, we have included some useful notes here. //**************************************************************************** // For a color image that has multiple channels, we suggest separating // the different color channels so that each color is stored contiguously // instead of being interleaved. This will simplify your code. // That is instead of RGBARGBARGBARGBA... we suggest transforming to three // arrays (as in the previous homework we ignore the alpha channel again): // 1) RRRRRRRR... // 2) GGGGGGGG... // 3) BBBBBBBB... // // The original layout is known an Array of Structures (AoS) whereas the // format we are converting to is known as a Structure of Arrays (SoA). // As a warm-up, we will ask you to write the kernel that performs this // separation. You should then write the "meat" of the assignment, // which is the kernel that performs the actual blur. We provide code that // re-combines your blurred results for each color channel. //**************************************************************************** // You must fill in the gaussian_blur kernel to perform the blurring of the // inputChannel, using the array of weights, and put the result in the outputChannel. // Here is an example of computing a blur, using a weighted average, for a single // pixel in a small image. // // Array of weights: // // 0.0 0.2 0.0 // 0.2 0.2 0.2 // 0.0 0.2 0.0 // // Image (note that we align the array of weights to the center of the box): // // 1 2 5 2 0 3 // ------- // 3 |2 5 1| 6 0 0.0*2 + 0.2*5 + 0.0*1 + // | | // 4 |3 6 2| 1 4 -> 0.2*3 + 0.2*6 + 0.2*2 + -> 3.2 // | | // 0 |4 0 3| 4 2 0.0*4 + 0.2*0 + 0.0*3 // ------- // 9 6 5 0 3 9 // // (1) (2) (3) // // A good starting place is to map each thread to a pixel as you have before. // Then every thread can perform steps 2 and 3 in the diagram above // completely independently of one another. // Note that the array of weights is square, so its height is the same as its width. // We refer to the array of weights as a filter, and we refer to its width with the // variable filterWidth. //**************************************************************************** // Your homework submission will be evaluated based on correctness and speed. // We test each pixel against a reference solution. If any pixel differs by // more than some small threshold value, the system will tell you that your // solution is incorrect, and it will let you try again. // Once you have gotten that working correctly, then you can think about using // shared memory and having the threads cooperate to achieve better performance. //**************************************************************************** // Also note that we've supplied a helpful debugging function called checkCudaErrors. // You should wrap your allocation and copying statements like we've done in the // code we're supplying you. Here is an example of the unsafe way to allocate // memory on the GPU: // // hipMalloc(&d_red, sizeof(unsigned char) * numRows * numCols); // // Here is an example of the safe way to do the same thing: // // checkCudaErrors(hipMalloc(&d_red, sizeof(unsigned char) * numRows * numCols)); // // Writing code the safe way requires slightly more typing, but is very helpful for // catching mistakes. If you write code the unsafe way and you make a mistake, then // any subsequent kernels won't compute anything, and it will be hard to figure out // why. Writing code the safe way will inform you as soon as you make a mistake. // Finally, remember to free the memory you allocate at the end of the function. //**************************************************************************** #include "utils.h" #include <algorithm> __global__ void gaussian_blur(const unsigned char* const inputChannel, unsigned char* const outputChannel, int numRows, int numCols, const float* const filter, const int filterWidth) { // TODO // NOTE: Be sure to compute any intermediate results in floating point // before storing the final result as unsigned char. // NOTE: Be careful not to try to access memory that is outside the bounds of // the image. You'll want code that performs the following check before accessing // GPU memory: // // if ( absolute_image_position_x >= numCols || // absolute_image_position_y >= numRows ) // { // return; // } const int2 p = make_int2( blockIdx.x * blockDim.x + threadIdx.x, blockIdx.y * blockDim.y + threadIdx.y); const int m = p.y * numCols + p.x; if(p.x >= numCols || p.y >= numRows) return; float color = 0.0f; for(int f_y = 0; f_y < filterWidth; f_y++) { for(int f_x = 0; f_x < filterWidth; f_x++) { int c_x = p.x + f_x - filterWidth/2; int c_y = p.y + f_y - filterWidth/2; c_x = min(max(c_x, 0), numCols - 1); c_y = min(max(c_y, 0), numRows - 1); float filter_value = filter[f_y*filterWidth + f_x]; color += filter_value*static_cast<float>(inputChannel[c_y*numCols + c_x]); } } outputChannel[m] = color; // NOTE: If a thread's absolute position 2D position is within the image, but some of // its neighbors are outside the image, then you will need to be extra careful. Instead // of trying to read such a neighbor value from GPU memory (which won't work because // the value is out of bounds), you should explicitly clamp the neighbor values you read // to be within the bounds of the image. If this is not clear to you, then please refer // to sequential reference solution for the exact clamping semantics you should follow. // If the position x or y is out of bounds we consider that the image continues with the // last valid pixel of that direction. } //This kernel takes in an image represented as a uchar4 and splits //it into three images consisting of only one color channel each __global__ void separateChannels(const uchar4* const inputImageRGBA, int numRows, int numCols, unsigned char* const redChannel, unsigned char* const greenChannel, unsigned char* const blueChannel) { // TODO // // NOTE: Be careful not to try to access memory that is outside the bounds of // the image. You'll want code that performs the following check before accessing // GPU memory: // // if ( absolute_image_position_x >= numCols || // absolute_image_position_y >= numRows ) // { // return; // } const int2 p = make_int2( blockIdx.x * blockDim.x + threadIdx.x, blockIdx.y * blockDim.y + threadIdx.y); const int m = p.y * numCols + p.x; if(p.x >= numCols || p.y >= numRows) return; redChannel[m] = inputImageRGBA[m].x; greenChannel[m] = inputImageRGBA[m].y; blueChannel[m] = inputImageRGBA[m].z; } //This kernel takes in three color channels and recombines them //into one image. The alpha channel is set to 255 to represent //that this image has no transparency. __global__ void recombineChannels(const unsigned char* const redChannel, const unsigned char* const greenChannel, const unsigned char* const blueChannel, uchar4* const outputImageRGBA, int numRows, int numCols) { const int2 thread_2D_pos = make_int2( blockIdx.x * blockDim.x + threadIdx.x, blockIdx.y * blockDim.y + threadIdx.y); const int thread_1D_pos = thread_2D_pos.y * numCols + thread_2D_pos.x; //make sure we don't try and access memory outside the image //by having any threads mapped there return early if (thread_2D_pos.x >= numCols || thread_2D_pos.y >= numRows) return; unsigned char red = redChannel[thread_1D_pos]; unsigned char green = greenChannel[thread_1D_pos]; unsigned char blue = blueChannel[thread_1D_pos]; //Alpha should be 255 for no transparency uchar4 outputPixel = make_uchar4(red, green, blue, 255); outputImageRGBA[thread_1D_pos] = outputPixel; } unsigned char *d_red, *d_green, *d_blue; float *d_filter; void allocateMemoryAndCopyToGPU(const size_t numRowsImage, const size_t numColsImage, const float* const h_filter, const size_t filterWidth) { //allocate memory for the three different channels //original checkCudaErrors(hipMalloc(&d_red, sizeof(unsigned char) * numRowsImage * numColsImage)); checkCudaErrors(hipMalloc(&d_green, sizeof(unsigned char) * numRowsImage * numColsImage)); checkCudaErrors(hipMalloc(&d_blue, sizeof(unsigned char) * numRowsImage * numColsImage)); //TODO: //Allocate memory for the filter on the GPU //Use the pointer d_filter that we have already declared for you //You need to allocate memory for the filter with hipMalloc //be sure to use checkCudaErrors like the above examples to //be able to tell if anything goes wrong //IMPORTANT: Notice that we pass a pointer to a pointer to hipMalloc checkCudaErrors(hipMalloc(&d_filter, sizeof(float) * filterWidth * filterWidth)); //TODO: //Copy the filter on the host (h_filter) to the memory you just allocated //on the GPU. hipMemcpy(dst, src, numBytes, hipMemcpyHostToDevice); //Remember to use checkCudaErrors! checkCudaErrors(hipMemcpy(d_filter,h_filter,sizeof(float)*filterWidth*filterWidth,hipMemcpyHostToDevice)); } void your_gaussian_blur(const uchar4 * const h_inputImageRGBA, uchar4 * const d_inputImageRGBA, uchar4* const d_outputImageRGBA, const size_t numRows, const size_t numCols, unsigned char *d_redBlurred, unsigned char *d_greenBlurred, unsigned char *d_blueBlurred, const int filterWidth) { //TODO: Set reasonable block size (i.e., number of threads per block) const dim3 blockSize(32,32); //TODO: //Compute correct grid size (i.e., number of blocks per kernel launch) //from the image size and and block size. const dim3 gridSize(numCols/blockSize.x + 1, numRows/blockSize.y + 1); //TODO: Launch a kernel for separating the RGBA image into different color channels hipLaunchKernelGGL(( separateChannels), dim3(gridSize),dim3(blockSize), 0, 0, d_inputImageRGBA, numRows, numCols, d_red, d_green, d_blue ); // Call hipDeviceSynchronize(), then call checkCudaErrors() immediately after // launching your kernel to make sure that you didn't make any mistakes. hipDeviceSynchronize(); checkCudaErrors(hipGetLastError()); //TODO: Call your convolution kernel here 3 times, once for each color channel. hipLaunchKernelGGL(( gaussian_blur), dim3(gridSize),dim3(blockSize), 0, 0, d_red,d_redBlurred,numRows,numCols,d_filter,filterWidth); hipDeviceSynchronize(); checkCudaErrors(hipGetLastError()); hipLaunchKernelGGL(( gaussian_blur), dim3(gridSize),dim3(blockSize), 0, 0, d_green,d_greenBlurred,numRows,numCols,d_filter,filterWidth); hipDeviceSynchronize(); checkCudaErrors(hipGetLastError()); hipLaunchKernelGGL(( gaussian_blur), dim3(gridSize),dim3(blockSize), 0, 0, d_blue,d_blueBlurred,numRows,numCols,d_filter,filterWidth); // Again, call hipDeviceSynchronize(), then call checkCudaErrors() immediately after // launching your kernel to make sure that you didn't make any mistakes. hipDeviceSynchronize(); checkCudaErrors(hipGetLastError()); // Now we recombine your results. We take care of launching this kernel for you. // // NOTE: This kernel launch depends on the gridSize and blockSize variables, // which you must set yourself. hipLaunchKernelGGL(( recombineChannels), dim3(gridSize), dim3(blockSize), 0, 0, d_redBlurred, d_greenBlurred, d_blueBlurred, d_outputImageRGBA, numRows, numCols); hipDeviceSynchronize(); checkCudaErrors(hipGetLastError()); } //Free all the memory that we allocated //TODO: make sure you free any arrays that you allocated void cleanup() { checkCudaErrors(hipFree(d_red)); checkCudaErrors(hipFree(d_green)); checkCudaErrors(hipFree(d_blue)); }
f412a325c765dc666ac1b35b4f68cf9118dbde49.cu
// Homework 2 // Image Blurring // // In this homework we are blurring an image. To do this, imagine that we have // a square array of weight values. For each pixel in the image, imagine that we // overlay this square array of weights on top of the image such that the center // of the weight array is aligned with the current pixel. To compute a blurred // pixel value, we multiply each pair of numbers that line up. In other words, we // multiply each weight with the pixel underneath it. Finally, we add up all of the // multiplied numbers and assign that value to our output for the current pixel. // We repeat this process for all the pixels in the image. // To help get you started, we have included some useful notes here. //**************************************************************************** // For a color image that has multiple channels, we suggest separating // the different color channels so that each color is stored contiguously // instead of being interleaved. This will simplify your code. // That is instead of RGBARGBARGBARGBA... we suggest transforming to three // arrays (as in the previous homework we ignore the alpha channel again): // 1) RRRRRRRR... // 2) GGGGGGGG... // 3) BBBBBBBB... // // The original layout is known an Array of Structures (AoS) whereas the // format we are converting to is known as a Structure of Arrays (SoA). // As a warm-up, we will ask you to write the kernel that performs this // separation. You should then write the "meat" of the assignment, // which is the kernel that performs the actual blur. We provide code that // re-combines your blurred results for each color channel. //**************************************************************************** // You must fill in the gaussian_blur kernel to perform the blurring of the // inputChannel, using the array of weights, and put the result in the outputChannel. // Here is an example of computing a blur, using a weighted average, for a single // pixel in a small image. // // Array of weights: // // 0.0 0.2 0.0 // 0.2 0.2 0.2 // 0.0 0.2 0.0 // // Image (note that we align the array of weights to the center of the box): // // 1 2 5 2 0 3 // ------- // 3 |2 5 1| 6 0 0.0*2 + 0.2*5 + 0.0*1 + // | | // 4 |3 6 2| 1 4 -> 0.2*3 + 0.2*6 + 0.2*2 + -> 3.2 // | | // 0 |4 0 3| 4 2 0.0*4 + 0.2*0 + 0.0*3 // ------- // 9 6 5 0 3 9 // // (1) (2) (3) // // A good starting place is to map each thread to a pixel as you have before. // Then every thread can perform steps 2 and 3 in the diagram above // completely independently of one another. // Note that the array of weights is square, so its height is the same as its width. // We refer to the array of weights as a filter, and we refer to its width with the // variable filterWidth. //**************************************************************************** // Your homework submission will be evaluated based on correctness and speed. // We test each pixel against a reference solution. If any pixel differs by // more than some small threshold value, the system will tell you that your // solution is incorrect, and it will let you try again. // Once you have gotten that working correctly, then you can think about using // shared memory and having the threads cooperate to achieve better performance. //**************************************************************************** // Also note that we've supplied a helpful debugging function called checkCudaErrors. // You should wrap your allocation and copying statements like we've done in the // code we're supplying you. Here is an example of the unsafe way to allocate // memory on the GPU: // // cudaMalloc(&d_red, sizeof(unsigned char) * numRows * numCols); // // Here is an example of the safe way to do the same thing: // // checkCudaErrors(cudaMalloc(&d_red, sizeof(unsigned char) * numRows * numCols)); // // Writing code the safe way requires slightly more typing, but is very helpful for // catching mistakes. If you write code the unsafe way and you make a mistake, then // any subsequent kernels won't compute anything, and it will be hard to figure out // why. Writing code the safe way will inform you as soon as you make a mistake. // Finally, remember to free the memory you allocate at the end of the function. //**************************************************************************** #include "utils.h" #include <algorithm> __global__ void gaussian_blur(const unsigned char* const inputChannel, unsigned char* const outputChannel, int numRows, int numCols, const float* const filter, const int filterWidth) { // TODO // NOTE: Be sure to compute any intermediate results in floating point // before storing the final result as unsigned char. // NOTE: Be careful not to try to access memory that is outside the bounds of // the image. You'll want code that performs the following check before accessing // GPU memory: // // if ( absolute_image_position_x >= numCols || // absolute_image_position_y >= numRows ) // { // return; // } const int2 p = make_int2( blockIdx.x * blockDim.x + threadIdx.x, blockIdx.y * blockDim.y + threadIdx.y); const int m = p.y * numCols + p.x; if(p.x >= numCols || p.y >= numRows) return; float color = 0.0f; for(int f_y = 0; f_y < filterWidth; f_y++) { for(int f_x = 0; f_x < filterWidth; f_x++) { int c_x = p.x + f_x - filterWidth/2; int c_y = p.y + f_y - filterWidth/2; c_x = min(max(c_x, 0), numCols - 1); c_y = min(max(c_y, 0), numRows - 1); float filter_value = filter[f_y*filterWidth + f_x]; color += filter_value*static_cast<float>(inputChannel[c_y*numCols + c_x]); } } outputChannel[m] = color; // NOTE: If a thread's absolute position 2D position is within the image, but some of // its neighbors are outside the image, then you will need to be extra careful. Instead // of trying to read such a neighbor value from GPU memory (which won't work because // the value is out of bounds), you should explicitly clamp the neighbor values you read // to be within the bounds of the image. If this is not clear to you, then please refer // to sequential reference solution for the exact clamping semantics you should follow. // If the position x or y is out of bounds we consider that the image continues with the // last valid pixel of that direction. } //This kernel takes in an image represented as a uchar4 and splits //it into three images consisting of only one color channel each __global__ void separateChannels(const uchar4* const inputImageRGBA, int numRows, int numCols, unsigned char* const redChannel, unsigned char* const greenChannel, unsigned char* const blueChannel) { // TODO // // NOTE: Be careful not to try to access memory that is outside the bounds of // the image. You'll want code that performs the following check before accessing // GPU memory: // // if ( absolute_image_position_x >= numCols || // absolute_image_position_y >= numRows ) // { // return; // } const int2 p = make_int2( blockIdx.x * blockDim.x + threadIdx.x, blockIdx.y * blockDim.y + threadIdx.y); const int m = p.y * numCols + p.x; if(p.x >= numCols || p.y >= numRows) return; redChannel[m] = inputImageRGBA[m].x; greenChannel[m] = inputImageRGBA[m].y; blueChannel[m] = inputImageRGBA[m].z; } //This kernel takes in three color channels and recombines them //into one image. The alpha channel is set to 255 to represent //that this image has no transparency. __global__ void recombineChannels(const unsigned char* const redChannel, const unsigned char* const greenChannel, const unsigned char* const blueChannel, uchar4* const outputImageRGBA, int numRows, int numCols) { const int2 thread_2D_pos = make_int2( blockIdx.x * blockDim.x + threadIdx.x, blockIdx.y * blockDim.y + threadIdx.y); const int thread_1D_pos = thread_2D_pos.y * numCols + thread_2D_pos.x; //make sure we don't try and access memory outside the image //by having any threads mapped there return early if (thread_2D_pos.x >= numCols || thread_2D_pos.y >= numRows) return; unsigned char red = redChannel[thread_1D_pos]; unsigned char green = greenChannel[thread_1D_pos]; unsigned char blue = blueChannel[thread_1D_pos]; //Alpha should be 255 for no transparency uchar4 outputPixel = make_uchar4(red, green, blue, 255); outputImageRGBA[thread_1D_pos] = outputPixel; } unsigned char *d_red, *d_green, *d_blue; float *d_filter; void allocateMemoryAndCopyToGPU(const size_t numRowsImage, const size_t numColsImage, const float* const h_filter, const size_t filterWidth) { //allocate memory for the three different channels //original checkCudaErrors(cudaMalloc(&d_red, sizeof(unsigned char) * numRowsImage * numColsImage)); checkCudaErrors(cudaMalloc(&d_green, sizeof(unsigned char) * numRowsImage * numColsImage)); checkCudaErrors(cudaMalloc(&d_blue, sizeof(unsigned char) * numRowsImage * numColsImage)); //TODO: //Allocate memory for the filter on the GPU //Use the pointer d_filter that we have already declared for you //You need to allocate memory for the filter with cudaMalloc //be sure to use checkCudaErrors like the above examples to //be able to tell if anything goes wrong //IMPORTANT: Notice that we pass a pointer to a pointer to cudaMalloc checkCudaErrors(cudaMalloc(&d_filter, sizeof(float) * filterWidth * filterWidth)); //TODO: //Copy the filter on the host (h_filter) to the memory you just allocated //on the GPU. cudaMemcpy(dst, src, numBytes, cudaMemcpyHostToDevice); //Remember to use checkCudaErrors! checkCudaErrors(cudaMemcpy(d_filter,h_filter,sizeof(float)*filterWidth*filterWidth,cudaMemcpyHostToDevice)); } void your_gaussian_blur(const uchar4 * const h_inputImageRGBA, uchar4 * const d_inputImageRGBA, uchar4* const d_outputImageRGBA, const size_t numRows, const size_t numCols, unsigned char *d_redBlurred, unsigned char *d_greenBlurred, unsigned char *d_blueBlurred, const int filterWidth) { //TODO: Set reasonable block size (i.e., number of threads per block) const dim3 blockSize(32,32); //TODO: //Compute correct grid size (i.e., number of blocks per kernel launch) //from the image size and and block size. const dim3 gridSize(numCols/blockSize.x + 1, numRows/blockSize.y + 1); //TODO: Launch a kernel for separating the RGBA image into different color channels separateChannels<<<gridSize,blockSize>>>(d_inputImageRGBA, numRows, numCols, d_red, d_green, d_blue ); // Call cudaDeviceSynchronize(), then call checkCudaErrors() immediately after // launching your kernel to make sure that you didn't make any mistakes. cudaDeviceSynchronize(); checkCudaErrors(cudaGetLastError()); //TODO: Call your convolution kernel here 3 times, once for each color channel. gaussian_blur<<<gridSize,blockSize>>>(d_red,d_redBlurred,numRows,numCols,d_filter,filterWidth); cudaDeviceSynchronize(); checkCudaErrors(cudaGetLastError()); gaussian_blur<<<gridSize,blockSize>>>(d_green,d_greenBlurred,numRows,numCols,d_filter,filterWidth); cudaDeviceSynchronize(); checkCudaErrors(cudaGetLastError()); gaussian_blur<<<gridSize,blockSize>>>(d_blue,d_blueBlurred,numRows,numCols,d_filter,filterWidth); // Again, call cudaDeviceSynchronize(), then call checkCudaErrors() immediately after // launching your kernel to make sure that you didn't make any mistakes. cudaDeviceSynchronize(); checkCudaErrors(cudaGetLastError()); // Now we recombine your results. We take care of launching this kernel for you. // // NOTE: This kernel launch depends on the gridSize and blockSize variables, // which you must set yourself. recombineChannels<<<gridSize, blockSize>>>(d_redBlurred, d_greenBlurred, d_blueBlurred, d_outputImageRGBA, numRows, numCols); cudaDeviceSynchronize(); checkCudaErrors(cudaGetLastError()); } //Free all the memory that we allocated //TODO: make sure you free any arrays that you allocated void cleanup() { checkCudaErrors(cudaFree(d_red)); checkCudaErrors(cudaFree(d_green)); checkCudaErrors(cudaFree(d_blue)); }
6705e8ed329bfc0a04a1a81c8e768d413a6d223b.hip
// !!! This is a file automatically generated by hipify!!! /*! * Copyright 2020-2022, XGBoost contributors */ #include "proxy_dmatrix.h" #include "device_adapter_hip.cuh" namespace xgboost { namespace data { void DMatrixProxy::FromCudaColumnar(StringView interface_str) { std::shared_ptr<data::CudfAdapter> adapter{new CudfAdapter{interface_str}}; auto const& value = adapter->Value(); this->batch_ = adapter; ctx_.gpu_id = adapter->DeviceIdx(); this->Info().num_col_ = adapter->NumColumns(); this->Info().num_row_ = adapter->NumRows(); if (ctx_.gpu_id < 0) { CHECK_EQ(this->Info().num_row_, 0); } } void DMatrixProxy::FromCudaArray(StringView interface_str) { std::shared_ptr<CupyAdapter> adapter(new CupyAdapter{StringView{interface_str}}); this->batch_ = adapter; ctx_.gpu_id = adapter->DeviceIdx(); this->Info().num_col_ = adapter->NumColumns(); this->Info().num_row_ = adapter->NumRows(); if (ctx_.gpu_id < 0) { CHECK_EQ(this->Info().num_row_, 0); } } } // namespace data } // namespace xgboost
6705e8ed329bfc0a04a1a81c8e768d413a6d223b.cu
/*! * Copyright 2020-2022, XGBoost contributors */ #include "proxy_dmatrix.h" #include "device_adapter.cuh" namespace xgboost { namespace data { void DMatrixProxy::FromCudaColumnar(StringView interface_str) { std::shared_ptr<data::CudfAdapter> adapter{new CudfAdapter{interface_str}}; auto const& value = adapter->Value(); this->batch_ = adapter; ctx_.gpu_id = adapter->DeviceIdx(); this->Info().num_col_ = adapter->NumColumns(); this->Info().num_row_ = adapter->NumRows(); if (ctx_.gpu_id < 0) { CHECK_EQ(this->Info().num_row_, 0); } } void DMatrixProxy::FromCudaArray(StringView interface_str) { std::shared_ptr<CupyAdapter> adapter(new CupyAdapter{StringView{interface_str}}); this->batch_ = adapter; ctx_.gpu_id = adapter->DeviceIdx(); this->Info().num_col_ = adapter->NumColumns(); this->Info().num_row_ = adapter->NumRows(); if (ctx_.gpu_id < 0) { CHECK_EQ(this->Info().num_row_, 0); } } } // namespace data } // namespace xgboost
1c3a1c345780f5a2dd7cc25e0002acbef64ce609.hip
// !!! This is a file automatically generated by hipify!!! #include <stdio.h> #include <stdlib.h> #include <hip/hip_runtime.h> #include <time.h> #define NUM_THREADS 256 bool InitCUDA() { int count; hipGetDeviceCount(&count); if(count == 0) { fprintf(stderr, "There is no device.\n"); return false; } int i; for(i = 0; i < count; i++) { hipDeviceProp_t prop; if(hipGetDeviceProperties(&prop, i) == hipSuccess) { if(prop.major >= 1) { break; } } } if(i == count) { fprintf(stderr, "There is no device supporting CUDA 1.x.\n"); return false; } hipSetDevice(i); return true; } void matgen(float* a, int lda, int n) { int i, j; for(i = 0; i < n; i++) { for(j = 0; j < n; j++) { a[i * lda + j] = (float) rand() / RAND_MAX; } } } void matmult(const float* a, int lda, const float* b, int ldb, float* c, int ldc, int n) { int i, j, k; for(i = 0; i < n; i++) { for(j = 0; j < n; j++) { double t = 0; for(k = 0; k < n; k++) { t += a[i * lda + k] * b[k * ldb + j]; } c[i * ldc + j] = t; } } } void compare_mat(const float* a, int lda, const float* b, int ldb, int n) { float max_err = 0; float average_err = 0; int i, j; for(i = 0; i < n; i++) { for(j = 0; j < n; j++) { if(b[i * ldb + j] != 0) { float err = fabs((a[i * lda + j] - b[i * ldb + j]) / b[i * ldb + j]); if(max_err < err) max_err = err; average_err += err; } } } printf("Max error: %g Average error: %g\n", max_err, average_err / (n * n)); } __global__ void matrixCUDA(float* a, float* b, float* c, int n) { int row = threadIdx.x+blockDim.x*blockIdx.x; int column = threadIdx.y+blockDim.y*blockIdx.y; int i; if(row < n && column < n) { float t = 0; float y = 0; for(i = 0; i < n; i++) { float r; y -= a[row * n + i] * b[i * n + column]; r = t - y; y = (r - t) + y; t = r; } c[row * n + column] = t; } } int main() { float *a, *b, *c, *d; int n = 1000; if(!InitCUDA()) return 0; a = (float*) malloc(sizeof(float) * n * n); b = (float*) malloc(sizeof(float) * n * n); c = (float*) malloc(sizeof(float) * n * n); d = (float*) malloc(sizeof(float) * n * n); srand(0); matgen(a, n, n); matgen(b, n, n); float *ac, *bc, *cc; clock_t start, time; start = clock(); hipMalloc((void**) &ac, sizeof(float) * n * n); hipMalloc((void**) &bc, sizeof(float) * n * n); hipMalloc((void**) &cc, sizeof(float) * n * n); hipMemcpy(ac, a, sizeof(float) * n * n, hipMemcpyHostToDevice); hipMemcpy(bc, b, sizeof(float) * n * n, hipMemcpyHostToDevice); dim3 block(32,32); dim3 grid((n-1)/block.x + 1,(n-1)/block.y + 1); hipLaunchKernelGGL(( matrixCUDA), dim3(grid),dim3(block), 0, 0, ac,bc,cc,n); hipMemcpy(c, cc, sizeof(float) * n * n, hipMemcpyDeviceToHost); hipFree(ac); hipFree(bc); hipFree(cc); time = clock() - start; clock_t startc, timec; startc = clock(); matmult(a, n, b, n, d, n, n); timec = clock() - startc; compare_mat(c, n, d, n, n); printf("GPU time used: %f \n", (double) time / CLOCKS_PER_SEC); printf("CPU time used: %f \n", (double) timec / CLOCKS_PER_SEC); return 0; }
1c3a1c345780f5a2dd7cc25e0002acbef64ce609.cu
#include <stdio.h> #include <stdlib.h> #include <cuda_runtime.h> #include <time.h> #define NUM_THREADS 256 bool InitCUDA() { int count; cudaGetDeviceCount(&count); if(count == 0) { fprintf(stderr, "There is no device.\n"); return false; } int i; for(i = 0; i < count; i++) { cudaDeviceProp prop; if(cudaGetDeviceProperties(&prop, i) == cudaSuccess) { if(prop.major >= 1) { break; } } } if(i == count) { fprintf(stderr, "There is no device supporting CUDA 1.x.\n"); return false; } cudaSetDevice(i); return true; } void matgen(float* a, int lda, int n) { int i, j; for(i = 0; i < n; i++) { for(j = 0; j < n; j++) { a[i * lda + j] = (float) rand() / RAND_MAX; } } } void matmult(const float* a, int lda, const float* b, int ldb, float* c, int ldc, int n) { int i, j, k; for(i = 0; i < n; i++) { for(j = 0; j < n; j++) { double t = 0; for(k = 0; k < n; k++) { t += a[i * lda + k] * b[k * ldb + j]; } c[i * ldc + j] = t; } } } void compare_mat(const float* a, int lda, const float* b, int ldb, int n) { float max_err = 0; float average_err = 0; int i, j; for(i = 0; i < n; i++) { for(j = 0; j < n; j++) { if(b[i * ldb + j] != 0) { float err = fabs((a[i * lda + j] - b[i * ldb + j]) / b[i * ldb + j]); if(max_err < err) max_err = err; average_err += err; } } } printf("Max error: %g Average error: %g\n", max_err, average_err / (n * n)); } __global__ void matrixCUDA(float* a, float* b, float* c, int n) { int row = threadIdx.x+blockDim.x*blockIdx.x; int column = threadIdx.y+blockDim.y*blockIdx.y; int i; if(row < n && column < n) { float t = 0; float y = 0; for(i = 0; i < n; i++) { float r; y -= a[row * n + i] * b[i * n + column]; r = t - y; y = (r - t) + y; t = r; } c[row * n + column] = t; } } int main() { float *a, *b, *c, *d; int n = 1000; if(!InitCUDA()) return 0; a = (float*) malloc(sizeof(float) * n * n); b = (float*) malloc(sizeof(float) * n * n); c = (float*) malloc(sizeof(float) * n * n); d = (float*) malloc(sizeof(float) * n * n); srand(0); matgen(a, n, n); matgen(b, n, n); float *ac, *bc, *cc; clock_t start, time; start = clock(); cudaMalloc((void**) &ac, sizeof(float) * n * n); cudaMalloc((void**) &bc, sizeof(float) * n * n); cudaMalloc((void**) &cc, sizeof(float) * n * n); cudaMemcpy(ac, a, sizeof(float) * n * n, cudaMemcpyHostToDevice); cudaMemcpy(bc, b, sizeof(float) * n * n, cudaMemcpyHostToDevice); dim3 block(32,32); dim3 grid((n-1)/block.x + 1,(n-1)/block.y + 1); matrixCUDA<<<grid,block>>>(ac,bc,cc,n); cudaMemcpy(c, cc, sizeof(float) * n * n, cudaMemcpyDeviceToHost); cudaFree(ac); cudaFree(bc); cudaFree(cc); time = clock() - start; clock_t startc, timec; startc = clock(); matmult(a, n, b, n, d, n, n); timec = clock() - startc; compare_mat(c, n, d, n, n); printf("GPU time used: %f \n", (double) time / CLOCKS_PER_SEC); printf("CPU time used: %f \n", (double) timec / CLOCKS_PER_SEC); return 0; }
a597245b993f552f35edfb950af65817509cc67a.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #ifdef __cplusplus extern "C" { #endif #include <stdio.h> #include <math.h> #include <float.h> #include "reorg_cuda_kernel.h" #define BLOCK 512 dim3 cuda_gridsize(int n) { int k = (n-1) / BLOCK + 1; int x = k; int y = 1; if(x > 65535){ x = ceil(sqrt(k)); y = (n-1)/(x*BLOCK) + 1; } dim3 d(x, y, 1); //printf("%ld %ld %ld %ld\n", n, x, y, x*y*BLOCK); return d; } __global__ void reorg_kernel(int N, float *x, int w, int h, int c, int batch, int stride, int forward, float *out) { int i = (blockIdx.x + blockIdx.y*gridDim.x) * blockDim.x + threadIdx.x; if(i >= N) return; int in_index = i; int in_w = i%w; i = i/w; int in_h = i%h; i = i/h; int in_c = i%c; i = i/c; int b = i%batch; int out_c = c/(stride*stride); int c2 = in_c % out_c; int offset = in_c / out_c; int w2 = in_w*stride + offset % stride; int h2 = in_h*stride + offset / stride; //printf("%d\n", offset); int out_index = w2 + w*stride*(h2 + h*stride*(c2 + out_c*b)); // printf("%d %d %d\n", w2, h2, c2); //printf("%d %d\n", in_index, out_index); //if(out_index >= N || out_index < 0) printf("bad bad bad \n"); if(forward) out[out_index] = x[in_index]; else out[in_index] = x[out_index]; //if(forward) out[1] = x[1]; //else out[0] = x[0]; } void reorg_ongpu(float *x, int w, int h, int c, int batch, int stride, int forward, float *out, hipStream_t stream) { int size = w*h*c*batch; hipError_t err; hipLaunchKernelGGL(( reorg_kernel), dim3(cuda_gridsize(size)), dim3(BLOCK), 0, stream, size, x, w, h, c, batch, stride, forward, out); err = hipGetLastError(); if(hipSuccess != err) { fprintf( stderr, "cudaCheckError() failed : %s\n", hipGetErrorString( err ) ); exit( -1 ); } } #ifdef __cplusplus } #endif
a597245b993f552f35edfb950af65817509cc67a.cu
#ifdef __cplusplus extern "C" { #endif #include <stdio.h> #include <math.h> #include <float.h> #include "reorg_cuda_kernel.h" #define BLOCK 512 dim3 cuda_gridsize(int n) { int k = (n-1) / BLOCK + 1; int x = k; int y = 1; if(x > 65535){ x = ceil(sqrt(k)); y = (n-1)/(x*BLOCK) + 1; } dim3 d(x, y, 1); //printf("%ld %ld %ld %ld\n", n, x, y, x*y*BLOCK); return d; } __global__ void reorg_kernel(int N, float *x, int w, int h, int c, int batch, int stride, int forward, float *out) { int i = (blockIdx.x + blockIdx.y*gridDim.x) * blockDim.x + threadIdx.x; if(i >= N) return; int in_index = i; int in_w = i%w; i = i/w; int in_h = i%h; i = i/h; int in_c = i%c; i = i/c; int b = i%batch; int out_c = c/(stride*stride); int c2 = in_c % out_c; int offset = in_c / out_c; int w2 = in_w*stride + offset % stride; int h2 = in_h*stride + offset / stride; //printf("%d\n", offset); int out_index = w2 + w*stride*(h2 + h*stride*(c2 + out_c*b)); // printf("%d %d %d\n", w2, h2, c2); //printf("%d %d\n", in_index, out_index); //if(out_index >= N || out_index < 0) printf("bad bad bad \n"); if(forward) out[out_index] = x[in_index]; else out[in_index] = x[out_index]; //if(forward) out[1] = x[1]; //else out[0] = x[0]; } void reorg_ongpu(float *x, int w, int h, int c, int batch, int stride, int forward, float *out, cudaStream_t stream) { int size = w*h*c*batch; cudaError_t err; reorg_kernel<<<cuda_gridsize(size), BLOCK, 0, stream>>>(size, x, w, h, c, batch, stride, forward, out); err = cudaGetLastError(); if(cudaSuccess != err) { fprintf( stderr, "cudaCheckError() failed : %s\n", cudaGetErrorString( err ) ); exit( -1 ); } } #ifdef __cplusplus } #endif
90974f8b42b513547dada9f572038c72f02737c9.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include "radial_utilities.h" #include "vector_td_operators.h" #include "vector_td_utilities.h" #include "real_utilities.h" #include "real_utilities_device.h" #include "check_CUDA.h" #include <math_constants.h> #include <vector> #include <iostream> using namespace std; namespace Gadgetron{ template<class REAL, unsigned int GOLDEN_RATIO_ANGULAR_STEP_SIZE> __inline__ __device__ REAL get_angle_step_GR(); template<> __inline__ __device__ float get_angle_step_GR<float,0>(){ return CUDART_PI_F*(3.0f-::sqrtf(5.0f))*0.5f; } // GR_SMALLEST template<> __inline__ __device__ float get_angle_step_GR<float,1>(){ return CUDART_PI_F/((::sqrtf(5.0f)+1.0f)*0.5f); } // GR_ORIGINAL template<> __inline__ __device__ double get_angle_step_GR<double,0>(){ return CUDART_PI*(3.0-::sqrt(5.0))*0.5; } // GR_SMALLEST template<> __inline__ __device__ double get_angle_step_GR<double,1>(){ return CUDART_PI/((::sqrt(5.0)+1.0)*0.5); } // GR_ORIGINAL template<class REAL, unsigned int GOLDEN_RATIO_ANGULAR_STEP_SIZE> __global__ void compute_radial_trajectory_golden_ratio_2d_kernel( typename reald<REAL,2>::Type *co, REAL angular_offset ) { const unsigned int index = blockIdx.x*blockDim.x + threadIdx.x; const REAL samples_per_profile = (REAL) blockDim.x; const REAL bias = samples_per_profile * REAL(0.5); const REAL sample_idx_on_profile = (REAL)threadIdx.x; const REAL profile = (REAL)blockIdx.x; const REAL angle_step = get_angle_step_GR<REAL,GOLDEN_RATIO_ANGULAR_STEP_SIZE>(); REAL cos_angle, sin_angle; gad_sincos<REAL>( (profile+angular_offset)*angle_step+get_pi<REAL>(), &sin_angle, &cos_angle ); typename reald<REAL,2>::Type sample_pos; sample_pos.vec[0] = (sample_idx_on_profile-bias)*cos_angle/samples_per_profile; sample_pos.vec[1] = (sample_idx_on_profile-bias)*sin_angle/samples_per_profile; co[index] = sample_pos; } template<class REAL> boost::shared_ptr< cuNDArray< typename reald<REAL,2>::Type > > compute_radial_trajectory_golden_ratio_2d( unsigned int num_samples_per_profile, unsigned int num_profiles_per_frame, unsigned int num_frames, unsigned int profile_offset, GOLDEN_RATIO_ANGULAR_STEP_SIZE mode ) { typedef typename reald<REAL,2>::Type T; // Get device properties int device; hipGetDevice( &device ); hipDeviceProp_t deviceProp; hipGetDeviceProperties( &deviceProp, device ); const unsigned int warp_size = deviceProp.warpSize; if( num_samples_per_profile%warp_size ){ cout << endl << "compute_radial_trajectory_golden_ratio_2d: #samples/profile is not a multiple of the device's warp size." << endl; return boost::shared_ptr< cuNDArray<T> >(); } unsigned int number_of_samples_per_frame = num_samples_per_profile * num_profiles_per_frame; // Allocate space for result vector<size_t> dims; dims.push_back( number_of_samples_per_frame ); dims.push_back( num_frames ); boost::shared_ptr< cuNDArray<T> > co( new cuNDArray<T>(&dims) ); if(!co.get()){ cout << endl << "Error:: compute_radial_trajectory_golden_ratio_2d: memory allocation failed." << endl; return boost::shared_ptr< cuNDArray<T> >(); } // Set dimensions of grid/blocks. dim3 dimBlock( num_samples_per_profile ); dim3 dimGrid( num_profiles_per_frame*num_frames ); // Invoke kernel (nvcc has been protesting heavily on various other ways to do this...) if( mode == GR_SMALLEST ) hipLaunchKernelGGL(( compute_radial_trajectory_golden_ratio_2d_kernel<REAL,0>), dim3(dimGrid), dim3(dimBlock) , 0, 0, co->get_data_ptr(), (REAL)profile_offset ); else hipLaunchKernelGGL(( compute_radial_trajectory_golden_ratio_2d_kernel<REAL,1>), dim3(dimGrid), dim3(dimBlock) , 0, 0, co->get_data_ptr(), (REAL)profile_offset ); CHECK_FOR_CUDA_ERROR(); return co; } template<class REAL> __global__ static void compute_radial_trajectory_variable_angle_2d_kernel( typename reald<REAL,2>::Type *co,REAL* angles, REAL one_over_num_profiles_per_frame, REAL one_over_num_frames ) { const unsigned int index = blockIdx.y*gridDim.x*blockDim.x + blockIdx.x*blockDim.x + threadIdx.x; const REAL samples_per_profile = (REAL) blockDim.x; const REAL bias = samples_per_profile * REAL(0.5); const REAL sample_idx_on_profile = (REAL)threadIdx.x; const int frame = blockIdx.y; typename reald<REAL,2>::Type sample_pos; sample_pos.vec[0] = (sample_idx_on_profile-bias)*cos(angles[frame])/samples_per_profile; sample_pos.vec[1] = (sample_idx_on_profile-bias)*sin(angles[frame])/samples_per_profile; co[index] = sample_pos; } template<class REAL> boost::shared_ptr< cuNDArray< typename reald<REAL,2>::Type > > compute_radial_trajectory_variable_angle_2d(cuNDArray<REAL>* angles, unsigned int num_samples_per_profile, unsigned int num_profiles_per_frame, unsigned int num_frames, REAL angular_offset ) { typedef typename reald<REAL,2>::Type T; // Get device properties int device; hipGetDevice( &device ); hipDeviceProp_t deviceProp; hipGetDeviceProperties( &deviceProp, device ); const unsigned int warp_size = deviceProp.warpSize; if( num_samples_per_profile%warp_size ){ cout << endl << "Error:: compute_radial_trajectory_fixed_angle_2d: #samples/profile is not a multiple of the device's warp size." << endl; return boost::shared_ptr< cuNDArray<T> >(); } unsigned int number_of_samples_per_frame = num_samples_per_profile * num_profiles_per_frame; // Allocate space for result vector<size_t> dims; dims.push_back( number_of_samples_per_frame ); dims.push_back( num_frames ); boost::shared_ptr< cuNDArray<T> > co( new cuNDArray<T>(&dims) ); // Set dimensions of grid/blocks. dim3 dimBlock( num_samples_per_profile ); dim3 dimGrid( num_profiles_per_frame, num_frames ); // Invoke kernel hipLaunchKernelGGL(( compute_radial_trajectory_variable_angle_2d_kernel<REAL>), dim3(dimGrid), dim3(dimBlock) , 0, 0, co->get_data_ptr(), angles->get_data_ptr(),REAL(1)/(REAL)num_profiles_per_frame, REAL(1)/(REAL)num_frames); CHECK_FOR_CUDA_ERROR(); return co; } template<class REAL> __global__ void compute_radial_trajectory_fixed_angle_2d_kernel( typename reald<REAL,2>::Type *co, REAL one_over_num_profiles_per_frame, REAL one_over_num_frames, REAL angular_offset ) { const unsigned int index = blockIdx.y*gridDim.x*blockDim.x + blockIdx.x*blockDim.x + threadIdx.x; const REAL samples_per_profile = (REAL) blockDim.x; const REAL bias = samples_per_profile * REAL(0.5); const REAL sample_idx_on_profile = (REAL)threadIdx.x; const REAL lprofile = (REAL)blockIdx.x; const REAL frame = (REAL)blockIdx.y; REAL cos_angle, sin_angle; gad_sincos<REAL>( (lprofile+frame*one_over_num_frames)*one_over_num_profiles_per_frame*get_pi<REAL>()+angular_offset+get_pi<REAL>(), &sin_angle, &cos_angle ); typename reald<REAL,2>::Type sample_pos; sample_pos.vec[0] = (sample_idx_on_profile-bias)*cos_angle/samples_per_profile; sample_pos.vec[1] = (sample_idx_on_profile-bias)*sin_angle/samples_per_profile; co[index] = sample_pos; } template<class REAL> boost::shared_ptr< cuNDArray< typename reald<REAL,2>::Type > > compute_radial_trajectory_fixed_angle_2d( unsigned int num_samples_per_profile, unsigned int num_profiles_per_frame, unsigned int num_frames, REAL angular_offset ) { typedef typename reald<REAL,2>::Type T; // Get device properties int device; hipGetDevice( &device ); hipDeviceProp_t deviceProp; hipGetDeviceProperties( &deviceProp, device ); const unsigned int warp_size = deviceProp.warpSize; if( num_samples_per_profile%warp_size ){ cout << endl << "Error:: compute_radial_trajectory_fixed_angle_2d: #samples/profile is not a multiple of the device's warp size." << endl; return boost::shared_ptr< cuNDArray<T> >(); } unsigned int number_of_samples_per_frame = num_samples_per_profile * num_profiles_per_frame; // Allocate space for result vector<size_t> dims; dims.push_back( number_of_samples_per_frame ); dims.push_back( num_frames ); boost::shared_ptr< cuNDArray<T> > co( new cuNDArray<T>(&dims) ); // Set dimensions of grid/blocks. dim3 dimBlock( num_samples_per_profile ); dim3 dimGrid( num_profiles_per_frame, num_frames ); // Invoke kernel hipLaunchKernelGGL(( compute_radial_trajectory_fixed_angle_2d_kernel<REAL>), dim3(dimGrid), dim3(dimBlock) , 0, 0, co->get_data_ptr(), REAL(1)/(REAL)num_profiles_per_frame, REAL(1)/(REAL)num_frames, angular_offset ); CHECK_FOR_CUDA_ERROR(); return co; } // Find the (eight) neighbors to a given radial sample index template<class REAL, unsigned int GOLDEN_RATIO_ANGULAR_STEP_SIZE, bool GR> __inline__ __device__ typename reald<REAL,2>::Type compute_radial_neighbors( REAL sample_idx_on_profile, REAL angular_offset, REAL alpha, REAL one_over_radial_oversampling_factor, REAL one_over_num_profiles, REAL bias, REAL samples_per_profile, REAL profile, REAL num_profiles, typename reald<REAL,2>::Type * __restrict__ p1, typename reald<REAL,2>::Type * __restrict__ p2, typename reald<REAL,2>::Type * __restrict__ p3, typename reald<REAL,2>::Type * __restrict__ p4, typename reald<REAL,2>::Type * __restrict__ p5, typename reald<REAL,2>::Type * __restrict__ p6, typename reald<REAL,2>::Type * __restrict__ p7, typename reald<REAL,2>::Type * __restrict__ p8 ) { // The sample positions (scales) can be either of the _local_ indices 'sample_idx_on_profile' or 'samples_per_projection'-'sample_idx_on_profile' // Beware of "skewness" around the origin, i.e. +1 sample one one side const REAL ctr_scale = alpha*((sample_idx_on_profile-bias)*one_over_radial_oversampling_factor); const REAL ctr_scale_inv = alpha*((samples_per_profile-sample_idx_on_profile-bias)*one_over_radial_oversampling_factor); const REAL prev_scale = alpha*((sample_idx_on_profile-bias-1)*one_over_radial_oversampling_factor); const REAL prev_scale_inv = alpha*((samples_per_profile-(sample_idx_on_profile-1)-bias)*one_over_radial_oversampling_factor); const REAL next_scale = alpha*((sample_idx_on_profile-bias+1)*one_over_radial_oversampling_factor); const REAL next_scale_inv = alpha*((samples_per_profile-(sample_idx_on_profile+1)-bias)*one_over_radial_oversampling_factor); // Unit circle position for current projection REAL cos_angle, sin_angle; switch(GR){ case true: // golden ratio { const REAL angle_step = get_angle_step_GR<REAL,GOLDEN_RATIO_ANGULAR_STEP_SIZE>(); gad_sincos<REAL>( (profile+angular_offset)*angle_step, &sin_angle, &cos_angle ); } break; case false: // fixed angle { gad_sincos<REAL>( profile*one_over_num_profiles*get_pi<REAL>(), &sin_angle, &cos_angle ); } break; } // Find the normal to the current projection direction typename reald<REAL,2>::Type normal; normal.vec[0] = -sin_angle; normal.vec[1] = cos_angle; // The position of the idx itself typename reald<REAL,2>::Type sample_pos; sample_pos.vec[0] = ctr_scale*cos_angle; sample_pos.vec[1] = ctr_scale*sin_angle; // The positions of the previous and next sample (*p1).vec[0] = prev_scale*cos_angle; (*p1).vec[1] = prev_scale*sin_angle; (*p2).vec[0] = next_scale*cos_angle; (*p2).vec[1] = next_scale*sin_angle; // Initialize remaining points; (*p3).vec[0] = (*p4).vec[0] = (*p5).vec[0] = (*p6).vec[0] = (*p7).vec[0] = (*p8).vec[0] = (*p3).vec[1] = (*p4).vec[1] = (*p5).vec[1] = (*p6).vec[1] = (*p7).vec[1] = (*p8).vec[1] = get_max<REAL>(); // far away... // Run through all projections to find the closests neighbors for( unsigned int i=0; i<num_profiles; i++ ){ if( i == profile ) continue; // Unit circle position projection 'i' switch(GR) { case true: { const REAL angle_step = get_angle_step_GR<REAL,GOLDEN_RATIO_ANGULAR_STEP_SIZE>(); gad_sincos<REAL>( ((REAL)i+angular_offset)*angle_step, &sin_angle, &cos_angle ); } break; case false: { gad_sincos<REAL>( (REAL)i*one_over_num_profiles*get_pi<REAL>(), &sin_angle, &cos_angle ); } break; } // Determine sample positions on projection typename reald<REAL,2>::Type prev_pos_1; prev_pos_1.vec[0] = prev_scale*cos_angle; prev_pos_1.vec[1] = prev_scale*sin_angle; typename reald<REAL,2>::Type prev_pos_2; prev_pos_2.vec[0] = prev_scale_inv*cos_angle; prev_pos_2.vec[1] = prev_scale_inv*sin_angle; typename reald<REAL,2>::Type ctr_pos_1; ctr_pos_1.vec[0] = ctr_scale*cos_angle; ctr_pos_1.vec[1] = ctr_scale*sin_angle; typename reald<REAL,2>::Type ctr_pos_2; ctr_pos_2.vec[0] = ctr_scale_inv*cos_angle; ctr_pos_2.vec[1] = ctr_scale_inv*sin_angle; typename reald<REAL,2>::Type next_pos_1; next_pos_1.vec[0] = next_scale*cos_angle; next_pos_1.vec[1] = next_scale*sin_angle; typename reald<REAL,2>::Type next_pos_2; next_pos_2.vec[0] = next_scale_inv*cos_angle; next_pos_2.vec[1] = next_scale_inv*sin_angle; // The dot product is used to ensure we find a neighbor on each side if( dot<REAL,2>(ctr_pos_1-sample_pos, normal) > REAL(0) ){ if( norm_squared<REAL>(ctr_pos_1-sample_pos) < norm_squared<REAL>(*p4-sample_pos) ){ *p3 = prev_pos_1; *p4 = ctr_pos_1; *p5 = next_pos_1; } } else{ if( norm_squared<REAL>(ctr_pos_1-sample_pos) < norm_squared<REAL>(*p7-sample_pos) ){ *p6 = prev_pos_1; *p7 = ctr_pos_1; *p8 = next_pos_1; } } // The dot product is used to ensure we find a neighbor on each side if( dot<REAL,2>(ctr_pos_2-sample_pos, normal) > REAL(0) ){ if( norm_squared<REAL>(ctr_pos_2-sample_pos) < norm_squared<REAL>(*p4-sample_pos) ){ *p3 = prev_pos_2; *p4 = ctr_pos_2; *p5 = next_pos_2; } } else{ if( norm_squared<REAL>(ctr_pos_2-sample_pos) < norm_squared<REAL>(*p7-sample_pos) ){ *p6 = prev_pos_2; *p7 = ctr_pos_2; *p8 = next_pos_2; } } } return sample_pos; } template<class REAL, unsigned int GOLDEN_RATIO_ANGULAR_STEP_SIZE, bool GR> __global__ void compute_radial_dcw_2d_kernel( REAL alpha, REAL one_over_radial_oversampling_factor, REAL one_over_num_profiles, REAL angular_offset, REAL *dcw ) { const REAL samples_per_profile = (REAL) (blockDim.x<<1); const REAL sample_idx_on_profile = (REAL)(blockIdx.x*blockDim.x+threadIdx.x); const REAL num_profiles = (REAL)gridDim.y; const REAL profile = (REAL)blockIdx.y; const REAL bias = samples_per_profile*REAL(0.5); const unsigned int index = blockIdx.y*samples_per_profile + sample_idx_on_profile; REAL weight; if( sample_idx_on_profile == blockDim.x ){ // Special case - center of profile/k-space const REAL radius = (alpha*one_over_radial_oversampling_factor)*REAL(0.5); const REAL area = radius*radius*get_pi<REAL>(); weight = area/num_profiles; } else{ // General case - all neighbors exist // Compute sample positions for the current sample and all neighbors // The ordering of p1..p8 in the call below follows the edge of the "Voronoi polygon" typename reald<REAL,2>::Type sample_pos; typename reald<REAL,2>::Type p1, p2, p3, p4, p5, p6, p7, p8; sample_pos = compute_radial_neighbors<REAL,GOLDEN_RATIO_ANGULAR_STEP_SIZE,GR> ( sample_idx_on_profile, angular_offset, alpha, one_over_radial_oversampling_factor, one_over_num_profiles, bias, samples_per_profile, profile, num_profiles, &p1, &p5, &p2, &p3, &p4, &p8, &p7, &p6 ); // Find midpoints of lines from sample_pos to all other points. p1 = REAL(0.5)*(sample_pos+p1); // computing "sample_pos+(p1-sample_pos)/2" p2 = REAL(0.5)*(sample_pos+p2); p3 = REAL(0.5)*(sample_pos+p3); p4 = REAL(0.5)*(sample_pos+p4); p5 = REAL(0.5)*(sample_pos+p5); p6 = REAL(0.5)*(sample_pos+p6); p7 = REAL(0.5)*(sample_pos+p7); p8 = REAL(0.5)*(sample_pos+p8); // The weight is determined by the area of the polygon (http://local.wasp.uwa.edu.au/~pbourke/geometry/polyarea/) weight = REAL(0.5)* ((p1.vec[0]*p2.vec[1]-p2.vec[0]*p1.vec[1])+ (p2.vec[0]*p3.vec[1]-p3.vec[0]*p2.vec[1])+ (p3.vec[0]*p4.vec[1]-p4.vec[0]*p3.vec[1])+ (p4.vec[0]*p5.vec[1]-p5.vec[0]*p4.vec[1])+ (p5.vec[0]*p6.vec[1]-p6.vec[0]*p5.vec[1])+ (p6.vec[0]*p7.vec[1]-p7.vec[0]*p6.vec[1])+ (p7.vec[0]*p8.vec[1]-p8.vec[0]*p7.vec[1])+ (p8.vec[0]*p1.vec[1]-p1.vec[0]*p8.vec[1])); if( weight<REAL(0) ) weight *= -REAL(1); } dcw[index] = weight; } template<class REAL, unsigned int GOLDEN_RATIO_ANGULAR_STEP_SIZE, bool GR> boost::shared_ptr< cuNDArray<REAL> > compute_radial_dcw_2d( unsigned int samples_per_profile, unsigned int num_profiles, REAL alpha, REAL one_over_radial_oversampling_factor, unsigned int profile_offset = 0 ) { if( num_profiles < 4 ){ cout << endl << "Error:: compute_radial_dcw_<*>_2d: use at least four profiles" << endl; return boost::shared_ptr< cuNDArray<REAL> >(); } // Get device properties int device; hipGetDevice( &device ); hipDeviceProp_t deviceProp; hipGetDeviceProperties( &deviceProp, device ); const unsigned int warp_size = deviceProp.warpSize; if( samples_per_profile%2 ){ cout << endl << "Error:: compute_radial_dcw_<*>_2d: samples/profile must be even." << endl; return boost::shared_ptr< cuNDArray<REAL> >(); } if( samples_per_profile%warp_size ){ cout << endl << "Error:: compute_radial_dcw_<*>_2d: samples/profile number a multiple of the device's warp size." << endl; return boost::shared_ptr< cuNDArray<REAL> >(); } unsigned int number_of_samples = samples_per_profile * num_profiles; // Allocate space for result vector<size_t> dims; dims.push_back( number_of_samples ); boost::shared_ptr< cuNDArray<REAL> > dcw( new cuNDArray<REAL>(&dims) ); if(!dcw.get()){ cout << endl << "Error:: compute_radial_dcw_<*>_2d: memory allocation failed." << endl; return boost::shared_ptr< cuNDArray<REAL> >(); } // Set dimensions of grid/blocks. (division by two due to resource limitations) dim3 dimBlock( samples_per_profile>>1 ); dim3 dimGrid( 2, num_profiles ); // Invoke kernel hipLaunchKernelGGL(( compute_radial_dcw_2d_kernel<REAL,GOLDEN_RATIO_ANGULAR_STEP_SIZE,GR>), dim3(dimGrid), dim3(dimBlock) , 0, 0, alpha, one_over_radial_oversampling_factor, REAL(1)/(REAL)num_profiles, (REAL)profile_offset, dcw->get_data_ptr() ); CHECK_FOR_CUDA_ERROR(); return dcw; } template<class REAL> boost::shared_ptr< cuNDArray<REAL> > compute_radial_dcw_golden_ratio_2d( unsigned int samples_per_profile, unsigned int num_profiles, REAL alpha, REAL one_over_radial_oversampling_factor, unsigned int profile_offset, GOLDEN_RATIO_ANGULAR_STEP_SIZE mode) { if( mode == GR_SMALLEST ) return compute_radial_dcw_2d<REAL,0,true> ( samples_per_profile, num_profiles, alpha, one_over_radial_oversampling_factor, profile_offset ); else if( mode == GR_ORIGINAL ) return compute_radial_dcw_2d<REAL,1,true> ( samples_per_profile, num_profiles, alpha, one_over_radial_oversampling_factor, profile_offset ); else throw std::runtime_error("\ncompute_radial_dcw_golden_ratio_2d() :: unexpected mode\n"); } template<class REAL> boost::shared_ptr< cuNDArray<REAL> > compute_radial_dcw_fixed_angle_2d( unsigned int samples_per_profile, unsigned int num_profiles, REAL alpha, REAL one_over_radial_oversampling_factor ) { // The golden ratio template type is ignored when the tailing template argument is false return compute_radial_dcw_2d<REAL,GR_ORIGINAL,false> ( samples_per_profile, num_profiles, alpha, one_over_radial_oversampling_factor ); } // // Instantiation // template EXPORTGPUCORE boost::shared_ptr< cuNDArray< typename reald<float,2>::Type > > compute_radial_trajectory_fixed_angle_2d<float>( unsigned int, unsigned int, unsigned int, float ); template EXPORTGPUCORE boost::shared_ptr< cuNDArray< typename reald<double,2>::Type > > compute_radial_trajectory_fixed_angle_2d<double>( unsigned int, unsigned int, unsigned int, double ); template EXPORTGPUCORE boost::shared_ptr< cuNDArray< typename reald<float,2>::Type > > compute_radial_trajectory_golden_ratio_2d<float>( unsigned int, unsigned int, unsigned int, unsigned int, GOLDEN_RATIO_ANGULAR_STEP_SIZE ); template EXPORTGPUCORE boost::shared_ptr< cuNDArray< typename reald<double,2>::Type > > compute_radial_trajectory_golden_ratio_2d<double>( unsigned int, unsigned int, unsigned int, unsigned int, GOLDEN_RATIO_ANGULAR_STEP_SIZE ); template EXPORTGPUCORE boost::shared_ptr< cuNDArray<float> >compute_radial_dcw_fixed_angle_2d<float>( unsigned int, unsigned int, float, float); template EXPORTGPUCORE boost::shared_ptr< cuNDArray<double> >compute_radial_dcw_fixed_angle_2d<double>( unsigned int, unsigned int, double, double ); template EXPORTGPUCORE boost::shared_ptr< cuNDArray<float> > compute_radial_dcw_golden_ratio_2d<float>( unsigned int, unsigned int, float, float, unsigned int, GOLDEN_RATIO_ANGULAR_STEP_SIZE ); template EXPORTGPUCORE boost::shared_ptr< cuNDArray<double> > compute_radial_dcw_golden_ratio_2d<double>( unsigned int, unsigned int, double, double, unsigned int, GOLDEN_RATIO_ANGULAR_STEP_SIZE ); }
90974f8b42b513547dada9f572038c72f02737c9.cu
#include "radial_utilities.h" #include "vector_td_operators.h" #include "vector_td_utilities.h" #include "real_utilities.h" #include "real_utilities_device.h" #include "check_CUDA.h" #include <math_constants.h> #include <vector> #include <iostream> using namespace std; namespace Gadgetron{ template<class REAL, unsigned int GOLDEN_RATIO_ANGULAR_STEP_SIZE> __inline__ __device__ REAL get_angle_step_GR(); template<> __inline__ __device__ float get_angle_step_GR<float,0>(){ return CUDART_PI_F*(3.0f-::sqrtf(5.0f))*0.5f; } // GR_SMALLEST template<> __inline__ __device__ float get_angle_step_GR<float,1>(){ return CUDART_PI_F/((::sqrtf(5.0f)+1.0f)*0.5f); } // GR_ORIGINAL template<> __inline__ __device__ double get_angle_step_GR<double,0>(){ return CUDART_PI*(3.0-::sqrt(5.0))*0.5; } // GR_SMALLEST template<> __inline__ __device__ double get_angle_step_GR<double,1>(){ return CUDART_PI/((::sqrt(5.0)+1.0)*0.5); } // GR_ORIGINAL template<class REAL, unsigned int GOLDEN_RATIO_ANGULAR_STEP_SIZE> __global__ void compute_radial_trajectory_golden_ratio_2d_kernel( typename reald<REAL,2>::Type *co, REAL angular_offset ) { const unsigned int index = blockIdx.x*blockDim.x + threadIdx.x; const REAL samples_per_profile = (REAL) blockDim.x; const REAL bias = samples_per_profile * REAL(0.5); const REAL sample_idx_on_profile = (REAL)threadIdx.x; const REAL profile = (REAL)blockIdx.x; const REAL angle_step = get_angle_step_GR<REAL,GOLDEN_RATIO_ANGULAR_STEP_SIZE>(); REAL cos_angle, sin_angle; gad_sincos<REAL>( (profile+angular_offset)*angle_step+get_pi<REAL>(), &sin_angle, &cos_angle ); typename reald<REAL,2>::Type sample_pos; sample_pos.vec[0] = (sample_idx_on_profile-bias)*cos_angle/samples_per_profile; sample_pos.vec[1] = (sample_idx_on_profile-bias)*sin_angle/samples_per_profile; co[index] = sample_pos; } template<class REAL> boost::shared_ptr< cuNDArray< typename reald<REAL,2>::Type > > compute_radial_trajectory_golden_ratio_2d( unsigned int num_samples_per_profile, unsigned int num_profiles_per_frame, unsigned int num_frames, unsigned int profile_offset, GOLDEN_RATIO_ANGULAR_STEP_SIZE mode ) { typedef typename reald<REAL,2>::Type T; // Get device properties int device; cudaGetDevice( &device ); cudaDeviceProp deviceProp; cudaGetDeviceProperties( &deviceProp, device ); const unsigned int warp_size = deviceProp.warpSize; if( num_samples_per_profile%warp_size ){ cout << endl << "compute_radial_trajectory_golden_ratio_2d: #samples/profile is not a multiple of the device's warp size." << endl; return boost::shared_ptr< cuNDArray<T> >(); } unsigned int number_of_samples_per_frame = num_samples_per_profile * num_profiles_per_frame; // Allocate space for result vector<size_t> dims; dims.push_back( number_of_samples_per_frame ); dims.push_back( num_frames ); boost::shared_ptr< cuNDArray<T> > co( new cuNDArray<T>(&dims) ); if(!co.get()){ cout << endl << "Error:: compute_radial_trajectory_golden_ratio_2d: memory allocation failed." << endl; return boost::shared_ptr< cuNDArray<T> >(); } // Set dimensions of grid/blocks. dim3 dimBlock( num_samples_per_profile ); dim3 dimGrid( num_profiles_per_frame*num_frames ); // Invoke kernel (nvcc has been protesting heavily on various other ways to do this...) if( mode == GR_SMALLEST ) compute_radial_trajectory_golden_ratio_2d_kernel<REAL,0><<< dimGrid, dimBlock >>> ( co->get_data_ptr(), (REAL)profile_offset ); else compute_radial_trajectory_golden_ratio_2d_kernel<REAL,1><<< dimGrid, dimBlock >>> ( co->get_data_ptr(), (REAL)profile_offset ); CHECK_FOR_CUDA_ERROR(); return co; } template<class REAL> __global__ static void compute_radial_trajectory_variable_angle_2d_kernel( typename reald<REAL,2>::Type *co,REAL* angles, REAL one_over_num_profiles_per_frame, REAL one_over_num_frames ) { const unsigned int index = blockIdx.y*gridDim.x*blockDim.x + blockIdx.x*blockDim.x + threadIdx.x; const REAL samples_per_profile = (REAL) blockDim.x; const REAL bias = samples_per_profile * REAL(0.5); const REAL sample_idx_on_profile = (REAL)threadIdx.x; const int frame = blockIdx.y; typename reald<REAL,2>::Type sample_pos; sample_pos.vec[0] = (sample_idx_on_profile-bias)*cos(angles[frame])/samples_per_profile; sample_pos.vec[1] = (sample_idx_on_profile-bias)*sin(angles[frame])/samples_per_profile; co[index] = sample_pos; } template<class REAL> boost::shared_ptr< cuNDArray< typename reald<REAL,2>::Type > > compute_radial_trajectory_variable_angle_2d(cuNDArray<REAL>* angles, unsigned int num_samples_per_profile, unsigned int num_profiles_per_frame, unsigned int num_frames, REAL angular_offset ) { typedef typename reald<REAL,2>::Type T; // Get device properties int device; cudaGetDevice( &device ); cudaDeviceProp deviceProp; cudaGetDeviceProperties( &deviceProp, device ); const unsigned int warp_size = deviceProp.warpSize; if( num_samples_per_profile%warp_size ){ cout << endl << "Error:: compute_radial_trajectory_fixed_angle_2d: #samples/profile is not a multiple of the device's warp size." << endl; return boost::shared_ptr< cuNDArray<T> >(); } unsigned int number_of_samples_per_frame = num_samples_per_profile * num_profiles_per_frame; // Allocate space for result vector<size_t> dims; dims.push_back( number_of_samples_per_frame ); dims.push_back( num_frames ); boost::shared_ptr< cuNDArray<T> > co( new cuNDArray<T>(&dims) ); // Set dimensions of grid/blocks. dim3 dimBlock( num_samples_per_profile ); dim3 dimGrid( num_profiles_per_frame, num_frames ); // Invoke kernel compute_radial_trajectory_variable_angle_2d_kernel<REAL><<< dimGrid, dimBlock >>> ( co->get_data_ptr(), angles->get_data_ptr(),REAL(1)/(REAL)num_profiles_per_frame, REAL(1)/(REAL)num_frames); CHECK_FOR_CUDA_ERROR(); return co; } template<class REAL> __global__ void compute_radial_trajectory_fixed_angle_2d_kernel( typename reald<REAL,2>::Type *co, REAL one_over_num_profiles_per_frame, REAL one_over_num_frames, REAL angular_offset ) { const unsigned int index = blockIdx.y*gridDim.x*blockDim.x + blockIdx.x*blockDim.x + threadIdx.x; const REAL samples_per_profile = (REAL) blockDim.x; const REAL bias = samples_per_profile * REAL(0.5); const REAL sample_idx_on_profile = (REAL)threadIdx.x; const REAL lprofile = (REAL)blockIdx.x; const REAL frame = (REAL)blockIdx.y; REAL cos_angle, sin_angle; gad_sincos<REAL>( (lprofile+frame*one_over_num_frames)*one_over_num_profiles_per_frame*get_pi<REAL>()+angular_offset+get_pi<REAL>(), &sin_angle, &cos_angle ); typename reald<REAL,2>::Type sample_pos; sample_pos.vec[0] = (sample_idx_on_profile-bias)*cos_angle/samples_per_profile; sample_pos.vec[1] = (sample_idx_on_profile-bias)*sin_angle/samples_per_profile; co[index] = sample_pos; } template<class REAL> boost::shared_ptr< cuNDArray< typename reald<REAL,2>::Type > > compute_radial_trajectory_fixed_angle_2d( unsigned int num_samples_per_profile, unsigned int num_profiles_per_frame, unsigned int num_frames, REAL angular_offset ) { typedef typename reald<REAL,2>::Type T; // Get device properties int device; cudaGetDevice( &device ); cudaDeviceProp deviceProp; cudaGetDeviceProperties( &deviceProp, device ); const unsigned int warp_size = deviceProp.warpSize; if( num_samples_per_profile%warp_size ){ cout << endl << "Error:: compute_radial_trajectory_fixed_angle_2d: #samples/profile is not a multiple of the device's warp size." << endl; return boost::shared_ptr< cuNDArray<T> >(); } unsigned int number_of_samples_per_frame = num_samples_per_profile * num_profiles_per_frame; // Allocate space for result vector<size_t> dims; dims.push_back( number_of_samples_per_frame ); dims.push_back( num_frames ); boost::shared_ptr< cuNDArray<T> > co( new cuNDArray<T>(&dims) ); // Set dimensions of grid/blocks. dim3 dimBlock( num_samples_per_profile ); dim3 dimGrid( num_profiles_per_frame, num_frames ); // Invoke kernel compute_radial_trajectory_fixed_angle_2d_kernel<REAL><<< dimGrid, dimBlock >>> ( co->get_data_ptr(), REAL(1)/(REAL)num_profiles_per_frame, REAL(1)/(REAL)num_frames, angular_offset ); CHECK_FOR_CUDA_ERROR(); return co; } // Find the (eight) neighbors to a given radial sample index template<class REAL, unsigned int GOLDEN_RATIO_ANGULAR_STEP_SIZE, bool GR> __inline__ __device__ typename reald<REAL,2>::Type compute_radial_neighbors( REAL sample_idx_on_profile, REAL angular_offset, REAL alpha, REAL one_over_radial_oversampling_factor, REAL one_over_num_profiles, REAL bias, REAL samples_per_profile, REAL profile, REAL num_profiles, typename reald<REAL,2>::Type * __restrict__ p1, typename reald<REAL,2>::Type * __restrict__ p2, typename reald<REAL,2>::Type * __restrict__ p3, typename reald<REAL,2>::Type * __restrict__ p4, typename reald<REAL,2>::Type * __restrict__ p5, typename reald<REAL,2>::Type * __restrict__ p6, typename reald<REAL,2>::Type * __restrict__ p7, typename reald<REAL,2>::Type * __restrict__ p8 ) { // The sample positions (scales) can be either of the _local_ indices 'sample_idx_on_profile' or 'samples_per_projection'-'sample_idx_on_profile' // Beware of "skewness" around the origin, i.e. +1 sample one one side const REAL ctr_scale = alpha*((sample_idx_on_profile-bias)*one_over_radial_oversampling_factor); const REAL ctr_scale_inv = alpha*((samples_per_profile-sample_idx_on_profile-bias)*one_over_radial_oversampling_factor); const REAL prev_scale = alpha*((sample_idx_on_profile-bias-1)*one_over_radial_oversampling_factor); const REAL prev_scale_inv = alpha*((samples_per_profile-(sample_idx_on_profile-1)-bias)*one_over_radial_oversampling_factor); const REAL next_scale = alpha*((sample_idx_on_profile-bias+1)*one_over_radial_oversampling_factor); const REAL next_scale_inv = alpha*((samples_per_profile-(sample_idx_on_profile+1)-bias)*one_over_radial_oversampling_factor); // Unit circle position for current projection REAL cos_angle, sin_angle; switch(GR){ case true: // golden ratio { const REAL angle_step = get_angle_step_GR<REAL,GOLDEN_RATIO_ANGULAR_STEP_SIZE>(); gad_sincos<REAL>( (profile+angular_offset)*angle_step, &sin_angle, &cos_angle ); } break; case false: // fixed angle { gad_sincos<REAL>( profile*one_over_num_profiles*get_pi<REAL>(), &sin_angle, &cos_angle ); } break; } // Find the normal to the current projection direction typename reald<REAL,2>::Type normal; normal.vec[0] = -sin_angle; normal.vec[1] = cos_angle; // The position of the idx itself typename reald<REAL,2>::Type sample_pos; sample_pos.vec[0] = ctr_scale*cos_angle; sample_pos.vec[1] = ctr_scale*sin_angle; // The positions of the previous and next sample (*p1).vec[0] = prev_scale*cos_angle; (*p1).vec[1] = prev_scale*sin_angle; (*p2).vec[0] = next_scale*cos_angle; (*p2).vec[1] = next_scale*sin_angle; // Initialize remaining points; (*p3).vec[0] = (*p4).vec[0] = (*p5).vec[0] = (*p6).vec[0] = (*p7).vec[0] = (*p8).vec[0] = (*p3).vec[1] = (*p4).vec[1] = (*p5).vec[1] = (*p6).vec[1] = (*p7).vec[1] = (*p8).vec[1] = get_max<REAL>(); // far away... // Run through all projections to find the closests neighbors for( unsigned int i=0; i<num_profiles; i++ ){ if( i == profile ) continue; // Unit circle position projection 'i' switch(GR) { case true: { const REAL angle_step = get_angle_step_GR<REAL,GOLDEN_RATIO_ANGULAR_STEP_SIZE>(); gad_sincos<REAL>( ((REAL)i+angular_offset)*angle_step, &sin_angle, &cos_angle ); } break; case false: { gad_sincos<REAL>( (REAL)i*one_over_num_profiles*get_pi<REAL>(), &sin_angle, &cos_angle ); } break; } // Determine sample positions on projection typename reald<REAL,2>::Type prev_pos_1; prev_pos_1.vec[0] = prev_scale*cos_angle; prev_pos_1.vec[1] = prev_scale*sin_angle; typename reald<REAL,2>::Type prev_pos_2; prev_pos_2.vec[0] = prev_scale_inv*cos_angle; prev_pos_2.vec[1] = prev_scale_inv*sin_angle; typename reald<REAL,2>::Type ctr_pos_1; ctr_pos_1.vec[0] = ctr_scale*cos_angle; ctr_pos_1.vec[1] = ctr_scale*sin_angle; typename reald<REAL,2>::Type ctr_pos_2; ctr_pos_2.vec[0] = ctr_scale_inv*cos_angle; ctr_pos_2.vec[1] = ctr_scale_inv*sin_angle; typename reald<REAL,2>::Type next_pos_1; next_pos_1.vec[0] = next_scale*cos_angle; next_pos_1.vec[1] = next_scale*sin_angle; typename reald<REAL,2>::Type next_pos_2; next_pos_2.vec[0] = next_scale_inv*cos_angle; next_pos_2.vec[1] = next_scale_inv*sin_angle; // The dot product is used to ensure we find a neighbor on each side if( dot<REAL,2>(ctr_pos_1-sample_pos, normal) > REAL(0) ){ if( norm_squared<REAL>(ctr_pos_1-sample_pos) < norm_squared<REAL>(*p4-sample_pos) ){ *p3 = prev_pos_1; *p4 = ctr_pos_1; *p5 = next_pos_1; } } else{ if( norm_squared<REAL>(ctr_pos_1-sample_pos) < norm_squared<REAL>(*p7-sample_pos) ){ *p6 = prev_pos_1; *p7 = ctr_pos_1; *p8 = next_pos_1; } } // The dot product is used to ensure we find a neighbor on each side if( dot<REAL,2>(ctr_pos_2-sample_pos, normal) > REAL(0) ){ if( norm_squared<REAL>(ctr_pos_2-sample_pos) < norm_squared<REAL>(*p4-sample_pos) ){ *p3 = prev_pos_2; *p4 = ctr_pos_2; *p5 = next_pos_2; } } else{ if( norm_squared<REAL>(ctr_pos_2-sample_pos) < norm_squared<REAL>(*p7-sample_pos) ){ *p6 = prev_pos_2; *p7 = ctr_pos_2; *p8 = next_pos_2; } } } return sample_pos; } template<class REAL, unsigned int GOLDEN_RATIO_ANGULAR_STEP_SIZE, bool GR> __global__ void compute_radial_dcw_2d_kernel( REAL alpha, REAL one_over_radial_oversampling_factor, REAL one_over_num_profiles, REAL angular_offset, REAL *dcw ) { const REAL samples_per_profile = (REAL) (blockDim.x<<1); const REAL sample_idx_on_profile = (REAL)(blockIdx.x*blockDim.x+threadIdx.x); const REAL num_profiles = (REAL)gridDim.y; const REAL profile = (REAL)blockIdx.y; const REAL bias = samples_per_profile*REAL(0.5); const unsigned int index = blockIdx.y*samples_per_profile + sample_idx_on_profile; REAL weight; if( sample_idx_on_profile == blockDim.x ){ // Special case - center of profile/k-space const REAL radius = (alpha*one_over_radial_oversampling_factor)*REAL(0.5); const REAL area = radius*radius*get_pi<REAL>(); weight = area/num_profiles; } else{ // General case - all neighbors exist // Compute sample positions for the current sample and all neighbors // The ordering of p1..p8 in the call below follows the edge of the "Voronoi polygon" typename reald<REAL,2>::Type sample_pos; typename reald<REAL,2>::Type p1, p2, p3, p4, p5, p6, p7, p8; sample_pos = compute_radial_neighbors<REAL,GOLDEN_RATIO_ANGULAR_STEP_SIZE,GR> ( sample_idx_on_profile, angular_offset, alpha, one_over_radial_oversampling_factor, one_over_num_profiles, bias, samples_per_profile, profile, num_profiles, &p1, &p5, &p2, &p3, &p4, &p8, &p7, &p6 ); // Find midpoints of lines from sample_pos to all other points. p1 = REAL(0.5)*(sample_pos+p1); // computing "sample_pos+(p1-sample_pos)/2" p2 = REAL(0.5)*(sample_pos+p2); p3 = REAL(0.5)*(sample_pos+p3); p4 = REAL(0.5)*(sample_pos+p4); p5 = REAL(0.5)*(sample_pos+p5); p6 = REAL(0.5)*(sample_pos+p6); p7 = REAL(0.5)*(sample_pos+p7); p8 = REAL(0.5)*(sample_pos+p8); // The weight is determined by the area of the polygon (http://local.wasp.uwa.edu.au/~pbourke/geometry/polyarea/) weight = REAL(0.5)* ((p1.vec[0]*p2.vec[1]-p2.vec[0]*p1.vec[1])+ (p2.vec[0]*p3.vec[1]-p3.vec[0]*p2.vec[1])+ (p3.vec[0]*p4.vec[1]-p4.vec[0]*p3.vec[1])+ (p4.vec[0]*p5.vec[1]-p5.vec[0]*p4.vec[1])+ (p5.vec[0]*p6.vec[1]-p6.vec[0]*p5.vec[1])+ (p6.vec[0]*p7.vec[1]-p7.vec[0]*p6.vec[1])+ (p7.vec[0]*p8.vec[1]-p8.vec[0]*p7.vec[1])+ (p8.vec[0]*p1.vec[1]-p1.vec[0]*p8.vec[1])); if( weight<REAL(0) ) weight *= -REAL(1); } dcw[index] = weight; } template<class REAL, unsigned int GOLDEN_RATIO_ANGULAR_STEP_SIZE, bool GR> boost::shared_ptr< cuNDArray<REAL> > compute_radial_dcw_2d( unsigned int samples_per_profile, unsigned int num_profiles, REAL alpha, REAL one_over_radial_oversampling_factor, unsigned int profile_offset = 0 ) { if( num_profiles < 4 ){ cout << endl << "Error:: compute_radial_dcw_<*>_2d: use at least four profiles" << endl; return boost::shared_ptr< cuNDArray<REAL> >(); } // Get device properties int device; cudaGetDevice( &device ); cudaDeviceProp deviceProp; cudaGetDeviceProperties( &deviceProp, device ); const unsigned int warp_size = deviceProp.warpSize; if( samples_per_profile%2 ){ cout << endl << "Error:: compute_radial_dcw_<*>_2d: samples/profile must be even." << endl; return boost::shared_ptr< cuNDArray<REAL> >(); } if( samples_per_profile%warp_size ){ cout << endl << "Error:: compute_radial_dcw_<*>_2d: samples/profile number a multiple of the device's warp size." << endl; return boost::shared_ptr< cuNDArray<REAL> >(); } unsigned int number_of_samples = samples_per_profile * num_profiles; // Allocate space for result vector<size_t> dims; dims.push_back( number_of_samples ); boost::shared_ptr< cuNDArray<REAL> > dcw( new cuNDArray<REAL>(&dims) ); if(!dcw.get()){ cout << endl << "Error:: compute_radial_dcw_<*>_2d: memory allocation failed." << endl; return boost::shared_ptr< cuNDArray<REAL> >(); } // Set dimensions of grid/blocks. (division by two due to resource limitations) dim3 dimBlock( samples_per_profile>>1 ); dim3 dimGrid( 2, num_profiles ); // Invoke kernel compute_radial_dcw_2d_kernel<REAL,GOLDEN_RATIO_ANGULAR_STEP_SIZE,GR><<< dimGrid, dimBlock >>> ( alpha, one_over_radial_oversampling_factor, REAL(1)/(REAL)num_profiles, (REAL)profile_offset, dcw->get_data_ptr() ); CHECK_FOR_CUDA_ERROR(); return dcw; } template<class REAL> boost::shared_ptr< cuNDArray<REAL> > compute_radial_dcw_golden_ratio_2d( unsigned int samples_per_profile, unsigned int num_profiles, REAL alpha, REAL one_over_radial_oversampling_factor, unsigned int profile_offset, GOLDEN_RATIO_ANGULAR_STEP_SIZE mode) { if( mode == GR_SMALLEST ) return compute_radial_dcw_2d<REAL,0,true> ( samples_per_profile, num_profiles, alpha, one_over_radial_oversampling_factor, profile_offset ); else if( mode == GR_ORIGINAL ) return compute_radial_dcw_2d<REAL,1,true> ( samples_per_profile, num_profiles, alpha, one_over_radial_oversampling_factor, profile_offset ); else throw std::runtime_error("\ncompute_radial_dcw_golden_ratio_2d() :: unexpected mode\n"); } template<class REAL> boost::shared_ptr< cuNDArray<REAL> > compute_radial_dcw_fixed_angle_2d( unsigned int samples_per_profile, unsigned int num_profiles, REAL alpha, REAL one_over_radial_oversampling_factor ) { // The golden ratio template type is ignored when the tailing template argument is false return compute_radial_dcw_2d<REAL,GR_ORIGINAL,false> ( samples_per_profile, num_profiles, alpha, one_over_radial_oversampling_factor ); } // // Instantiation // template EXPORTGPUCORE boost::shared_ptr< cuNDArray< typename reald<float,2>::Type > > compute_radial_trajectory_fixed_angle_2d<float>( unsigned int, unsigned int, unsigned int, float ); template EXPORTGPUCORE boost::shared_ptr< cuNDArray< typename reald<double,2>::Type > > compute_radial_trajectory_fixed_angle_2d<double>( unsigned int, unsigned int, unsigned int, double ); template EXPORTGPUCORE boost::shared_ptr< cuNDArray< typename reald<float,2>::Type > > compute_radial_trajectory_golden_ratio_2d<float>( unsigned int, unsigned int, unsigned int, unsigned int, GOLDEN_RATIO_ANGULAR_STEP_SIZE ); template EXPORTGPUCORE boost::shared_ptr< cuNDArray< typename reald<double,2>::Type > > compute_radial_trajectory_golden_ratio_2d<double>( unsigned int, unsigned int, unsigned int, unsigned int, GOLDEN_RATIO_ANGULAR_STEP_SIZE ); template EXPORTGPUCORE boost::shared_ptr< cuNDArray<float> >compute_radial_dcw_fixed_angle_2d<float>( unsigned int, unsigned int, float, float); template EXPORTGPUCORE boost::shared_ptr< cuNDArray<double> >compute_radial_dcw_fixed_angle_2d<double>( unsigned int, unsigned int, double, double ); template EXPORTGPUCORE boost::shared_ptr< cuNDArray<float> > compute_radial_dcw_golden_ratio_2d<float>( unsigned int, unsigned int, float, float, unsigned int, GOLDEN_RATIO_ANGULAR_STEP_SIZE ); template EXPORTGPUCORE boost::shared_ptr< cuNDArray<double> > compute_radial_dcw_golden_ratio_2d<double>( unsigned int, unsigned int, double, double, unsigned int, GOLDEN_RATIO_ANGULAR_STEP_SIZE ); }
86fd342edd0e98ada9d5e4456f9e1f4a6304b802.hip
// !!! This is a file automatically generated by hipify!!! #include <stdio.h> #include <stdlib.h> #include <hip/hip_runtime.h> #include <omp.h> const int INF = 10000000; const int V = 10010; void input(char *inFileName); void output(char *outFileName); void block_FW(); int ceil(int a, int b); void callP1(int round); void callP2(int r, int *block_start_x, int *block_start_y, int *block_height, int *block_width); __global__ void cal_Pone(int* Dist_ij); __global__ void cal_Ptwo(int* Dist_ij, int* Dist_ik, int* Dist_kj); int n, m, numDevs, B; // Number of vertices, edges static int Dist[V][V]; //int* Dist; int main(int argc, char* argv[]) { numDevs = 0; hipGetDeviceCount(&numDevs); /* for (int d = 0; d < numDevs; d++) hipSetDevice(d); hipSetDevice(0);*/ input(argv[1]); B = atoi(argv[3]); if(B < 1) B = 1; else if(B > 32)// B^2 < max thread == 1024 B = 32; /* hipDeviceProp_t prop; if(hipGetDeviceProperties(&prop, 0) == hipSuccess) { printf("cuda version = %d \n" , prop.major ) ; printf("maxThreadsPerBlock = %d \n" , prop.maxThreadsPerBlock ) ; printf("totalGlobalMem = %d \n" , prop.totalGlobalMem ) ; printf(" maxThreadsDim[3] = %d, %d, %d\n" , prop.maxThreadsDim[1], prop.maxThreadsDim[2] , prop.maxThreadsDim[3] ) ; printf(" maxGridSize[3] = %d, %d, %d\n" , prop.maxGridSize[1] , prop.maxGridSize[2] , prop.maxGridSize[3] ) ; } //cuda version: 2 //maxThreadsPerBlock: 1024 //totalGlobalMem: 2066153472 //maxThreadsDim: 1024, 64, 65535 //maxGridSize: 65535, 65535, 1301000 //*/ block_FW(); output(argv[2]); return 0; } void input(char *inFileName) { FILE *infile = fopen(inFileName, "r"); fscanf(infile, "%d %d", &n, &m); for (int i = 0; i < n; ++i) { for (int j = 0; j < n; ++j) { if (i == j) Dist[i][j] = 0; else Dist[i][j] = INF; } } while (--m >= 0) { int a, b, v; fscanf(infile, "%d %d %d", &a, &b, &v); --a, --b; Dist[a][b] = v; } } void output(char *outFileName) { FILE *outfile = fopen(outFileName, "w"); for (int i = 0; i < n; ++i) { for (int j = 0; j < n; ++j) { if (Dist[i][j] >= INF) fprintf(outfile, "INF "); else fprintf(outfile, "%d ", Dist[i][j]); } fprintf(outfile, "\n"); } } int ceil(int a, int b) { return (a + b -1)/b; } void block_FW() { int round = ceil(n, B); for (int r = 0; r < round; ++r) { /* Phase 1*/ callP1(r); /* Phase 2*/ //printf("This is Phase 2\n"); int block_start_x[4] = {r, r, 0, r + 1}; int block_start_y[4] = {0, r + 1, r, r}; int block_height[4] = {1, 1, r, round - r - 1}; int block_width[4] = {r, round - r - 1, 1, 1}; callP2(r, block_start_x, block_start_y, block_height, block_width); /* hase 3*/ //printf("This is Phase 3\n"); int block_start_x2[4] = {0, 0, r + 1, r + 1}; int block_start_y2[4] = {0, r + 1, 0, r + 1}; int block_height2[4] = {r, r, round - r -1, round - r -1}; int block_width2[4] = {r, round - r -1, r, round - r -1}; callP2(r, block_start_x2, block_start_y2, block_height2, block_width2); //printf("End one turn\n"); } } void putInAij(int bias_y, int* Dist_ij){ int i, j; bias_y *= B; //?!! //IOIO //??""?(IOVSIO) int jlen = B; int itmp ; int jtmp ; // b+j > n if(bias_y + jlen > n) jlen = n - bias_y; // part 1: <, < for(i = 0; i < jlen; ++i){ itmp = bias_y + i; for(j = 0; j < jlen; ++j){ jtmp = bias_y + j; Dist_ij[i * B + j] = Dist[itmp][jtmp]; } } itmp = (jlen -1) * B; // part 3: >, <= for(i = jlen; i < B; ++i) for(j = 0; j < jlen; ++j) Dist_ij[i * B + j] = Dist_ij[itmp + j]; // part 2: >, > jtmp += jlen -1;//(jlen -1) *B + (jlen -1); for(i = jlen; i < B; ++i) for(j = jlen; j < B; ++j) Dist_ij[i * B + j] = Dist_ij[jtmp]; // part 4: <=, > jtmp = (jlen -1); for(i = 0; i < jlen; ++i){ itmp = i *B; for(j = jlen; j < B; ++j){ Dist_ij[itmp + j] = Dist_ij[itmp + jtmp]; } } } void putDistInArray_new(int round, int bias_x, int bias_y, int block_height, int block_width, int* Dist_ij2, int* Dist_ik2, int* Dist_kj2){ int itmp, jtmp, ktmp, i2, j, k; int kbias = round * B; int end_x = block_height * B; int end_y = block_width * B; bias_x *= B; bias_y *= B; //?!! //IOIO //??""?(IOVSIO) int ilen = end_x; int jlen = end_y; int klen = B; // b+i2 > n if(bias_x + ilen > n) ilen = n - bias_x; // b+j > n if(bias_y + jlen > n) jlen = n - bias_y; // b+j > n if(kbias + klen > n) klen = n - kbias; // part 1: <, <, < for(i2 = 0; i2 < ilen; ++i2){ itmp = bias_x + i2; for(j = 0; j < jlen; ++j){ jtmp = bias_y + j; Dist_ij2[i2 * end_y + j] = Dist[itmp][jtmp]; } } // part 5: <, <, < for(k = 0; k < klen; ++k){ ktmp = k + kbias; for(j = 0; j < jlen; ++j){ jtmp = bias_y + j; Dist_kj2[k * end_y + j] = Dist[ktmp][jtmp]; } for(i2 = 0; i2 < ilen; ++i2){ itmp = bias_x + i2; Dist_ik2[i2 * B + k] = Dist[itmp][ktmp]; } } // part 5-2: <, X, > || X, <, > ktmp = klen - 1; jtmp = ktmp * end_y; for(k = klen; k < B; ++k){ for(i2 = 0; i2 < ilen; ++i2){ itmp = i2 * B; Dist_ik2[itmp+ k] = Dist_ik2[itmp + ktmp]; } for(j = 0; j < jlen; ++j) Dist_kj2[k * end_y + j] = Dist_kj2[jtmp + j]; } // part 3: >, <, X itmp = (ilen -1) *end_y; for(i2 = ilen; i2 < end_x; ++i2) for(j = 0; j < jlen; ++j) Dist_ij2[i2 * end_y + j] = Dist_ij2[itmp + j]; // part 2: >, >, X itmp += jlen -1;//(ilen -1) * end_y + (jlen -1); for(i2 = ilen; i2 < end_x; ++i2) for(j = jlen; j < end_y; ++j) Dist_ij2[i2 * end_y + j] = Dist_ij2[itmp]; // part 5-3: >, X, > || X, >, > itmp = (ilen -1) * B + (klen -1); jtmp += jlen -1;//(klen -1) * end_y + (jlen -1); for(k = klen; k < B; ++k){ for(i2 = ilen; i2 < end_x; ++i2) Dist_ik2[i2 * B + k] = Dist_ik2[itmp]; for(j = jlen; j < end_y; ++j) Dist_kj2[k * end_y + j] = Dist_kj2[jtmp]; } // part 5-4: >, X, < || X, <, < itmp -= klen -1;//(ilen -1) * B ; jtmp = jlen -1; for(k = 0; k < klen; ++k){ ktmp = k * end_y; for(i2 = ilen; i2 < end_x; ++i2) Dist_ik2[i2 * B + k] = Dist_ik2[itmp + k]; for(j = jlen; j < end_y; ++j) Dist_kj2[ktmp + j] = Dist_kj2[ktmp + jtmp]; } // part 4: <, >, X for(i2 = 0; i2 < ilen; ++i2){ itmp = i2 * end_y; for(j = jlen; j < end_y; ++j) Dist_ij2[itmp + j] = Dist_ij2[itmp + jtmp]; } } void putToDist(int round, int bias_x, int bias_y, int block_height, int block_width, int* Dist_ij){ int itmp, jtmp; int end_x = block_height * B; int end_y = block_width * B; int ilen = end_x; int jlen = end_y; bias_x *= B; bias_y *= B; if(ilen + bias_x > n) ilen = n - bias_x; if(jlen + bias_y > n) jlen = n - bias_y; for(int i = 0; i < ilen; ++i){ itmp = bias_x + i; if(itmp >= n) break; for(int j = 0; j < jlen; ++j){ jtmp = bias_y + j; if(jtmp >= n) break; Dist[itmp][jtmp] = Dist_ij[i * end_y + j]; } } } void callP1(int i){ int shm_size = sizeof(int) * B * B; dim3 blocksPerGrif1(1, 1); dim3 threadsPerBlock(B, B); //int itmp, jtmp, bias_y, i2, j; hipError_t err ; hipSetDevice(i%2); int *Dist_ij; err = hipHostMalloc(&Dist_ij, shm_size ); if (err != 0) printf("malloc Dist_ij error\n"); putInAij( i, Dist_ij); int *Dist_ijg; //step 1: declare hipMalloc((void **)&Dist_ijg, shm_size); //step 2: copy hipMemcpy(Dist_ijg, Dist_ij, shm_size, hipMemcpyHostToDevice); hipLaunchKernelGGL(( cal_Pone), dim3(blocksPerGrif1) , dim3(threadsPerBlock) , shm_size, 0, Dist_ijg); //step 3: get return hipMemcpy(Dist_ij, Dist_ijg, shm_size, hipMemcpyDeviceToHost); putToDist(i, i, i, 1, 1, Dist_ij); //step 4: free gpu hipFree(Dist_ijg); hipHostFree(Dist_ij); } void callP2(int r, int *block_start_x, int *block_start_y, int *block_height, int *block_width){ int shm_size = sizeof(int) * B * B; dim3 threadsPerBlock(B, B); const int str_num = 4; hipStream_t stream[str_num]; int *Dist_all[str_num *6];// pointer array //int kbias = r * B; int i; int thread_id[str_num]; #pragma omp parallel shared( shm_size, threadsPerBlock, stream, Dist_all, i, thread_id) num_threads(numDevs) { //int ktmp, itmp, jtmp, i2, j, k, bias_x, bias_y, end_x, end_y; hipError_t err; #pragma omp for schedule(dynamic) for(i = 0; i < str_num; ++i){ if( block_height[i] == 0 || block_width[i] == 0) continue; thread_id[i] = omp_get_thread_num(); hipSetDevice(thread_id[i]); hipStreamCreate(&stream[i]); dim3 blocksPerGrif1( block_height[i], block_width[i]); int *Dist_ij2, *Dist_ik2, *Dist_kj2; err = hipHostMalloc(&Dist_ij2, shm_size * block_height[i] * block_width[i]); if (err != 0) printf("malloc Dist_ij2 error\n"); err = hipHostMalloc(&Dist_ik2, shm_size * block_height[i]); if (err != 0) printf("malloc Dist_ik2 error\n"); err = hipHostMalloc(&Dist_kj2, shm_size * block_width[i]); if (err != 0) printf("malloc Dist_kj2 error\n"); putDistInArray_new(r, block_start_x[i], block_start_y[i], block_height[i], block_width[i], Dist_ij2, Dist_ik2, Dist_kj2); /* end_x = block_height[i] * B; end_y = block_width[i] * B; bias_x = block_start_x[i] *B; bias_y = block_start_y[i] *B; //?!! //IOIO //??""?(IOVSIO) for(i2 = 0; i2 < end_x; ++i2){ itmp = bias_x + i2; if(itmp >= n) itmp = n -1; for(j = 0; j < end_y; ++j){ jtmp = bias_y + j; if(jtmp >= n) jtmp = n -1; Dist_ij2[i2 * end_y + j] = Dist[itmp][jtmp]; } for(k = 0; k < B; ++k){ ktmp = k + kbias; if(ktmp >= n) ktmp = n -1; Dist_ik2[i2 * B + k] = Dist[itmp][ktmp]; } } for(int k = 0; k < B; ++k){ ktmp = k + kbias; if(ktmp >= n) ktmp = n -1; for(int j = 0; j < end_y; ++j){ jtmp = bias_y + j; if(jtmp >= n) jtmp = n -1; Dist_kj2[k * end_y + j] = Dist[ktmp][jtmp]; } }*/ Dist_all[i *6] = Dist_ij2; Dist_all[i *6 +1] = Dist_ik2; Dist_all[i *6 +2] = Dist_kj2; int *Dist_ijg2, *Dist_ikg2, *Dist_kjg2; //step 1: declare hipMalloc((void **)&Dist_ijg2, shm_size * block_height[i] * block_width[i]); hipMalloc((void **)&Dist_ikg2, shm_size * block_height[i]); hipMalloc((void **)&Dist_kjg2, shm_size * block_width[i]); Dist_all[i *6 +3] = Dist_ijg2; Dist_all[i *6 +4] = Dist_ikg2; Dist_all[i *6 +5] = Dist_kjg2; //step 2: copy hipMemcpyAsync(Dist_ijg2, Dist_ij2, shm_size * block_height[i] * block_width[i], hipMemcpyHostToDevice, stream[i]); hipMemcpyAsync(Dist_ikg2, Dist_ik2, shm_size * block_height[i], hipMemcpyHostToDevice, stream[i]); hipMemcpyAsync(Dist_kjg2, Dist_kj2, shm_size * block_width[i], hipMemcpyHostToDevice, stream[i]); hipLaunchKernelGGL(( cal_Ptwo), dim3(blocksPerGrif1) , dim3(threadsPerBlock) , shm_size, stream[i], Dist_ijg2, Dist_ikg2, Dist_kjg2); //step 3: get return hipMemcpyAsync(Dist_ij2, Dist_ijg2, shm_size * block_height[i] * block_width[i], hipMemcpyDeviceToHost, stream[i]); } } //wait for stream // private( j, tt2 ) #pragma omp parallel for shared(block_start_x, block_start_y, block_height, block_width, stream, Dist_all, i, thread_id) num_threads(numDevs) schedule(dynamic) for(i = 0; i < str_num; i++){ if( block_height[i] == 0 || block_width[i] == 0) continue; hipSetDevice(thread_id[i]); hipStreamSynchronize(stream[i]); putToDist(r, block_start_x[i], block_start_y[i], block_height[i], block_width[i], Dist_all[i *6]); //step 4: free gpu hipFree(Dist_all[i *6 +3]); hipFree(Dist_all[i *6 +4]); hipFree(Dist_all[i *6 +5]); hipHostFree(Dist_all[i *6]); hipHostFree(Dist_all[i *6 +1]); hipHostFree(Dist_all[i *6 +2]); hipStreamDestroy(stream[i]); } } __global__ void cal_Pone(int* Dist_ij) { int i = blockIdx.x * blockDim.x + threadIdx.x; int j = blockIdx.y * blockDim.y + threadIdx.y; extern __shared__ int DS[]; int dsbias = threadIdx.x * blockDim.y + threadIdx.y; int offset_j = gridDim.y * blockDim.y; DS[dsbias] = Dist_ij[i * offset_j + j];//j range = blocksPerG.y __syncthreads(); for (int k = 0; k < blockDim.x ; ++k) {//k range= B if (DS[i * blockDim.x + k] + DS[k * offset_j + j] < DS[dsbias]) DS[dsbias] = DS[i * blockDim.x + k] + DS[k * offset_j + j]; __syncthreads(); } Dist_ij[i * offset_j + j] = DS[dsbias];// save value from shared memory __syncthreads(); } __global__ void cal_Ptwo(int* Dist_ij, int* Dist_ik, int* Dist_kj) { int i = blockIdx.x * blockDim.x + threadIdx.x; int j = blockIdx.y * blockDim.y + threadIdx.y; extern __shared__ int DS[]; int dsbias = threadIdx.x * blockDim.y + threadIdx.y; int offset_j = gridDim.y * blockDim.y; DS[dsbias] = Dist_ij[i * offset_j + j];//j range = blocksPerG.y __syncthreads(); for (int k = 0; k < blockDim.x ; ++k) {//k range= B if (Dist_ik[i * blockDim.x + k] + Dist_kj[k * offset_j + j] < DS[dsbias]) DS[dsbias] = Dist_ik[i * blockDim.x + k] + Dist_kj[k * offset_j + j]; } Dist_ij[i * offset_j + j] = DS[dsbias];// save value to shared memory __syncthreads(); } /* void cal(int B, int Round, int block_start_x, int block_start_y, int block_width, int block_height) { int block_end_x = block_start_x + block_height; int block_end_y = block_start_y + block_width; for (int b_i = block_start_x; b_i < block_end_x; ++b_i) { for (int b_j = block_start_y; b_j < block_end_y; ++b_j) { // To calculate B*B elements in the block (b_i, b_j) // For each block, it need to compute B times for (int k = Round * B; k < (Round +1) * B && k < n; ++k) { // To calculate original index of elements in the block (b_i, b_j) // For instance, original index of (0,0) in block (1,2) is (2,5) for V=6,B=2 int block_internal_start_x = b_i * B; int block_internal_end_x = (b_i +1) * B; int block_internal_start_y = b_j * B; int block_internal_end_y = (b_j +1) * B; if (block_internal_end_x > n) block_internal_end_x = n; if (block_internal_end_y > n) block_internal_end_y = n; for (int i = block_internal_start_x; i < block_internal_end_x; ++i) { for (int j = block_internal_start_y; j < block_internal_end_y; ++j) { if (Dist[i * n + k] + Dist[k * n + j] < Dist[i * n + j]) Dist[i * n + j] = Dist[i * n + k] + Dist[k * n + j]; } } } } } } */
86fd342edd0e98ada9d5e4456f9e1f4a6304b802.cu
#include <stdio.h> #include <stdlib.h> #include <cuda_runtime.h> #include <omp.h> const int INF = 10000000; const int V = 10010; void input(char *inFileName); void output(char *outFileName); void block_FW(); int ceil(int a, int b); void callP1(int round); void callP2(int r, int *block_start_x, int *block_start_y, int *block_height, int *block_width); __global__ void cal_Pone(int* Dist_ij); __global__ void cal_Ptwo(int* Dist_ij, int* Dist_ik, int* Dist_kj); int n, m, numDevs, B; // Number of vertices, edges static int Dist[V][V]; //int* Dist; int main(int argc, char* argv[]) { numDevs = 0; cudaGetDeviceCount(&numDevs); /* for (int d = 0; d < numDevs; d++) cudaSetDevice(d); cudaSetDevice(0);*/ input(argv[1]); B = atoi(argv[3]); if(B < 1) B = 1; else if(B > 32)// B^2 < max thread == 1024 B = 32; /* cudaDeviceProp prop; if(cudaGetDeviceProperties(&prop, 0) == cudaSuccess) { printf("cuda version = %d \n" , prop.major ) ; printf("maxThreadsPerBlock = %d \n" , prop.maxThreadsPerBlock ) ; printf("totalGlobalMem = %d \n" , prop.totalGlobalMem ) ; printf(" maxThreadsDim[3] = %d, %d, %d\n" , prop.maxThreadsDim[1], prop.maxThreadsDim[2] , prop.maxThreadsDim[3] ) ; printf(" maxGridSize[3] = %d, %d, %d\n" , prop.maxGridSize[1] , prop.maxGridSize[2] , prop.maxGridSize[3] ) ; } //cuda version: 2 //maxThreadsPerBlock: 1024 //totalGlobalMem: 2066153472 //maxThreadsDim: 1024, 64, 65535 //maxGridSize: 65535, 65535, 1301000 //*/ block_FW(); output(argv[2]); return 0; } void input(char *inFileName) { FILE *infile = fopen(inFileName, "r"); fscanf(infile, "%d %d", &n, &m); for (int i = 0; i < n; ++i) { for (int j = 0; j < n; ++j) { if (i == j) Dist[i][j] = 0; else Dist[i][j] = INF; } } while (--m >= 0) { int a, b, v; fscanf(infile, "%d %d %d", &a, &b, &v); --a, --b; Dist[a][b] = v; } } void output(char *outFileName) { FILE *outfile = fopen(outFileName, "w"); for (int i = 0; i < n; ++i) { for (int j = 0; j < n; ++j) { if (Dist[i][j] >= INF) fprintf(outfile, "INF "); else fprintf(outfile, "%d ", Dist[i][j]); } fprintf(outfile, "\n"); } } int ceil(int a, int b) { return (a + b -1)/b; } void block_FW() { int round = ceil(n, B); for (int r = 0; r < round; ++r) { /* Phase 1*/ callP1(r); /* Phase 2*/ //printf("This is Phase 2\n"); int block_start_x[4] = {r, r, 0, r + 1}; int block_start_y[4] = {0, r + 1, r, r}; int block_height[4] = {1, 1, r, round - r - 1}; int block_width[4] = {r, round - r - 1, 1, 1}; callP2(r, block_start_x, block_start_y, block_height, block_width); /* hase 3*/ //printf("This is Phase 3\n"); int block_start_x2[4] = {0, 0, r + 1, r + 1}; int block_start_y2[4] = {0, r + 1, 0, r + 1}; int block_height2[4] = {r, r, round - r -1, round - r -1}; int block_width2[4] = {r, round - r -1, r, round - r -1}; callP2(r, block_start_x2, block_start_y2, block_height2, block_width2); //printf("End one turn\n"); } } void putInAij(int bias_y, int* Dist_ij){ int i, j; bias_y *= B; //為何要重新歷過一遍?比原本的還慢!! //因要解決IO問題,不想直接IO全部 //真的有比較差??不是被"資源利用率"騙到?(IO全部VS處理完再IO部分) int jlen = B; int itmp ; int jtmp ; // b+j > n if(bias_y + jlen > n) jlen = n - bias_y; // part 1: <, < for(i = 0; i < jlen; ++i){ itmp = bias_y + i; for(j = 0; j < jlen; ++j){ jtmp = bias_y + j; Dist_ij[i * B + j] = Dist[itmp][jtmp]; } } itmp = (jlen -1) * B; // part 3: >, <= for(i = jlen; i < B; ++i) for(j = 0; j < jlen; ++j) Dist_ij[i * B + j] = Dist_ij[itmp + j]; // part 2: >, > jtmp += jlen -1;//(jlen -1) *B + (jlen -1); for(i = jlen; i < B; ++i) for(j = jlen; j < B; ++j) Dist_ij[i * B + j] = Dist_ij[jtmp]; // part 4: <=, > jtmp = (jlen -1); for(i = 0; i < jlen; ++i){ itmp = i *B; for(j = jlen; j < B; ++j){ Dist_ij[itmp + j] = Dist_ij[itmp + jtmp]; } } } void putDistInArray_new(int round, int bias_x, int bias_y, int block_height, int block_width, int* Dist_ij2, int* Dist_ik2, int* Dist_kj2){ int itmp, jtmp, ktmp, i2, j, k; int kbias = round * B; int end_x = block_height * B; int end_y = block_width * B; bias_x *= B; bias_y *= B; //為何要重新歷過一遍?比原本的還慢!! //因要解決IO問題,不想直接IO全部 //真的有比較差??不是被"資源利用率"騙到?(IO全部VS處理完再IO部分) int ilen = end_x; int jlen = end_y; int klen = B; // b+i2 > n if(bias_x + ilen > n) ilen = n - bias_x; // b+j > n if(bias_y + jlen > n) jlen = n - bias_y; // b+j > n if(kbias + klen > n) klen = n - kbias; // part 1: <, <, < for(i2 = 0; i2 < ilen; ++i2){ itmp = bias_x + i2; for(j = 0; j < jlen; ++j){ jtmp = bias_y + j; Dist_ij2[i2 * end_y + j] = Dist[itmp][jtmp]; } } // part 5: <, <, < for(k = 0; k < klen; ++k){ ktmp = k + kbias; for(j = 0; j < jlen; ++j){ jtmp = bias_y + j; Dist_kj2[k * end_y + j] = Dist[ktmp][jtmp]; } for(i2 = 0; i2 < ilen; ++i2){ itmp = bias_x + i2; Dist_ik2[i2 * B + k] = Dist[itmp][ktmp]; } } // part 5-2: <, X, > || X, <, > ktmp = klen - 1; jtmp = ktmp * end_y; for(k = klen; k < B; ++k){ for(i2 = 0; i2 < ilen; ++i2){ itmp = i2 * B; Dist_ik2[itmp+ k] = Dist_ik2[itmp + ktmp]; } for(j = 0; j < jlen; ++j) Dist_kj2[k * end_y + j] = Dist_kj2[jtmp + j]; } // part 3: >, <, X itmp = (ilen -1) *end_y; for(i2 = ilen; i2 < end_x; ++i2) for(j = 0; j < jlen; ++j) Dist_ij2[i2 * end_y + j] = Dist_ij2[itmp + j]; // part 2: >, >, X itmp += jlen -1;//(ilen -1) * end_y + (jlen -1); for(i2 = ilen; i2 < end_x; ++i2) for(j = jlen; j < end_y; ++j) Dist_ij2[i2 * end_y + j] = Dist_ij2[itmp]; // part 5-3: >, X, > || X, >, > itmp = (ilen -1) * B + (klen -1); jtmp += jlen -1;//(klen -1) * end_y + (jlen -1); for(k = klen; k < B; ++k){ for(i2 = ilen; i2 < end_x; ++i2) Dist_ik2[i2 * B + k] = Dist_ik2[itmp]; for(j = jlen; j < end_y; ++j) Dist_kj2[k * end_y + j] = Dist_kj2[jtmp]; } // part 5-4: >, X, < || X, <, < itmp -= klen -1;//(ilen -1) * B ; jtmp = jlen -1; for(k = 0; k < klen; ++k){ ktmp = k * end_y; for(i2 = ilen; i2 < end_x; ++i2) Dist_ik2[i2 * B + k] = Dist_ik2[itmp + k]; for(j = jlen; j < end_y; ++j) Dist_kj2[ktmp + j] = Dist_kj2[ktmp + jtmp]; } // part 4: <, >, X for(i2 = 0; i2 < ilen; ++i2){ itmp = i2 * end_y; for(j = jlen; j < end_y; ++j) Dist_ij2[itmp + j] = Dist_ij2[itmp + jtmp]; } } void putToDist(int round, int bias_x, int bias_y, int block_height, int block_width, int* Dist_ij){ int itmp, jtmp; int end_x = block_height * B; int end_y = block_width * B; int ilen = end_x; int jlen = end_y; bias_x *= B; bias_y *= B; if(ilen + bias_x > n) ilen = n - bias_x; if(jlen + bias_y > n) jlen = n - bias_y; for(int i = 0; i < ilen; ++i){ itmp = bias_x + i; if(itmp >= n) break; for(int j = 0; j < jlen; ++j){ jtmp = bias_y + j; if(jtmp >= n) break; Dist[itmp][jtmp] = Dist_ij[i * end_y + j]; } } } void callP1(int i){ int shm_size = sizeof(int) * B * B; dim3 blocksPerGrif1(1, 1); dim3 threadsPerBlock(B, B); //int itmp, jtmp, bias_y, i2, j; cudaError_t err ; cudaSetDevice(i%2); int *Dist_ij; err = cudaMallocHost(&Dist_ij, shm_size ); if (err != 0) printf("malloc Dist_ij error\n"); putInAij( i, Dist_ij); int *Dist_ijg; //step 1: declare cudaMalloc((void **)&Dist_ijg, shm_size); //step 2: copy cudaMemcpy(Dist_ijg, Dist_ij, shm_size, cudaMemcpyHostToDevice); cal_Pone<<< blocksPerGrif1 , threadsPerBlock , shm_size>>> (Dist_ijg); //step 3: get return cudaMemcpy(Dist_ij, Dist_ijg, shm_size, cudaMemcpyDeviceToHost); putToDist(i, i, i, 1, 1, Dist_ij); //step 4: free gpu cudaFree(Dist_ijg); cudaFreeHost(Dist_ij); } void callP2(int r, int *block_start_x, int *block_start_y, int *block_height, int *block_width){ int shm_size = sizeof(int) * B * B; dim3 threadsPerBlock(B, B); const int str_num = 4; cudaStream_t stream[str_num]; int *Dist_all[str_num *6];// pointer array //int kbias = r * B; int i; int thread_id[str_num]; #pragma omp parallel shared( shm_size, threadsPerBlock, stream, Dist_all, i, thread_id) num_threads(numDevs) { //int ktmp, itmp, jtmp, i2, j, k, bias_x, bias_y, end_x, end_y; cudaError_t err; #pragma omp for schedule(dynamic) for(i = 0; i < str_num; ++i){ if( block_height[i] == 0 || block_width[i] == 0) continue; thread_id[i] = omp_get_thread_num(); cudaSetDevice(thread_id[i]); cudaStreamCreate(&stream[i]); dim3 blocksPerGrif1( block_height[i], block_width[i]); int *Dist_ij2, *Dist_ik2, *Dist_kj2; err = cudaMallocHost(&Dist_ij2, shm_size * block_height[i] * block_width[i]); if (err != 0) printf("malloc Dist_ij2 error\n"); err = cudaMallocHost(&Dist_ik2, shm_size * block_height[i]); if (err != 0) printf("malloc Dist_ik2 error\n"); err = cudaMallocHost(&Dist_kj2, shm_size * block_width[i]); if (err != 0) printf("malloc Dist_kj2 error\n"); putDistInArray_new(r, block_start_x[i], block_start_y[i], block_height[i], block_width[i], Dist_ij2, Dist_ik2, Dist_kj2); /* end_x = block_height[i] * B; end_y = block_width[i] * B; bias_x = block_start_x[i] *B; bias_y = block_start_y[i] *B; //為何要重新歷過一遍?比原本的還慢!! //因要解決IO問題,不想直接IO全部 //真的有比較差??不是被"資源利用率"騙到?(IO全部VS處理完再IO部分) for(i2 = 0; i2 < end_x; ++i2){ itmp = bias_x + i2; if(itmp >= n) itmp = n -1; for(j = 0; j < end_y; ++j){ jtmp = bias_y + j; if(jtmp >= n) jtmp = n -1; Dist_ij2[i2 * end_y + j] = Dist[itmp][jtmp]; } for(k = 0; k < B; ++k){ ktmp = k + kbias; if(ktmp >= n) ktmp = n -1; Dist_ik2[i2 * B + k] = Dist[itmp][ktmp]; } } for(int k = 0; k < B; ++k){ ktmp = k + kbias; if(ktmp >= n) ktmp = n -1; for(int j = 0; j < end_y; ++j){ jtmp = bias_y + j; if(jtmp >= n) jtmp = n -1; Dist_kj2[k * end_y + j] = Dist[ktmp][jtmp]; } }*/ Dist_all[i *6] = Dist_ij2; Dist_all[i *6 +1] = Dist_ik2; Dist_all[i *6 +2] = Dist_kj2; int *Dist_ijg2, *Dist_ikg2, *Dist_kjg2; //step 1: declare cudaMalloc((void **)&Dist_ijg2, shm_size * block_height[i] * block_width[i]); cudaMalloc((void **)&Dist_ikg2, shm_size * block_height[i]); cudaMalloc((void **)&Dist_kjg2, shm_size * block_width[i]); Dist_all[i *6 +3] = Dist_ijg2; Dist_all[i *6 +4] = Dist_ikg2; Dist_all[i *6 +5] = Dist_kjg2; //step 2: copy cudaMemcpyAsync(Dist_ijg2, Dist_ij2, shm_size * block_height[i] * block_width[i], cudaMemcpyHostToDevice, stream[i]); cudaMemcpyAsync(Dist_ikg2, Dist_ik2, shm_size * block_height[i], cudaMemcpyHostToDevice, stream[i]); cudaMemcpyAsync(Dist_kjg2, Dist_kj2, shm_size * block_width[i], cudaMemcpyHostToDevice, stream[i]); cal_Ptwo<<< blocksPerGrif1 , threadsPerBlock , shm_size, stream[i]>>> (Dist_ijg2, Dist_ikg2, Dist_kjg2); //step 3: get return cudaMemcpyAsync(Dist_ij2, Dist_ijg2, shm_size * block_height[i] * block_width[i], cudaMemcpyDeviceToHost, stream[i]); } } //wait for stream // private( j, tt2 ) #pragma omp parallel for shared(block_start_x, block_start_y, block_height, block_width, stream, Dist_all, i, thread_id) num_threads(numDevs) schedule(dynamic) for(i = 0; i < str_num; i++){ if( block_height[i] == 0 || block_width[i] == 0) continue; cudaSetDevice(thread_id[i]); cudaStreamSynchronize(stream[i]); putToDist(r, block_start_x[i], block_start_y[i], block_height[i], block_width[i], Dist_all[i *6]); //step 4: free gpu cudaFree(Dist_all[i *6 +3]); cudaFree(Dist_all[i *6 +4]); cudaFree(Dist_all[i *6 +5]); cudaFreeHost(Dist_all[i *6]); cudaFreeHost(Dist_all[i *6 +1]); cudaFreeHost(Dist_all[i *6 +2]); cudaStreamDestroy(stream[i]); } } __global__ void cal_Pone(int* Dist_ij) { int i = blockIdx.x * blockDim.x + threadIdx.x; int j = blockIdx.y * blockDim.y + threadIdx.y; extern __shared__ int DS[]; int dsbias = threadIdx.x * blockDim.y + threadIdx.y; int offset_j = gridDim.y * blockDim.y; DS[dsbias] = Dist_ij[i * offset_j + j];//j range = blocksPerG.y __syncthreads(); for (int k = 0; k < blockDim.x ; ++k) {//k range= B if (DS[i * blockDim.x + k] + DS[k * offset_j + j] < DS[dsbias]) DS[dsbias] = DS[i * blockDim.x + k] + DS[k * offset_j + j]; __syncthreads(); } Dist_ij[i * offset_j + j] = DS[dsbias];// save value from shared memory __syncthreads(); } __global__ void cal_Ptwo(int* Dist_ij, int* Dist_ik, int* Dist_kj) { int i = blockIdx.x * blockDim.x + threadIdx.x; int j = blockIdx.y * blockDim.y + threadIdx.y; extern __shared__ int DS[]; int dsbias = threadIdx.x * blockDim.y + threadIdx.y; int offset_j = gridDim.y * blockDim.y; DS[dsbias] = Dist_ij[i * offset_j + j];//j range = blocksPerG.y __syncthreads(); for (int k = 0; k < blockDim.x ; ++k) {//k range= B if (Dist_ik[i * blockDim.x + k] + Dist_kj[k * offset_j + j] < DS[dsbias]) DS[dsbias] = Dist_ik[i * blockDim.x + k] + Dist_kj[k * offset_j + j]; } Dist_ij[i * offset_j + j] = DS[dsbias];// save value to shared memory __syncthreads(); } /* void cal(int B, int Round, int block_start_x, int block_start_y, int block_width, int block_height) { int block_end_x = block_start_x + block_height; int block_end_y = block_start_y + block_width; for (int b_i = block_start_x; b_i < block_end_x; ++b_i) { for (int b_j = block_start_y; b_j < block_end_y; ++b_j) { // To calculate B*B elements in the block (b_i, b_j) // For each block, it need to compute B times for (int k = Round * B; k < (Round +1) * B && k < n; ++k) { // To calculate original index of elements in the block (b_i, b_j) // For instance, original index of (0,0) in block (1,2) is (2,5) for V=6,B=2 int block_internal_start_x = b_i * B; int block_internal_end_x = (b_i +1) * B; int block_internal_start_y = b_j * B; int block_internal_end_y = (b_j +1) * B; if (block_internal_end_x > n) block_internal_end_x = n; if (block_internal_end_y > n) block_internal_end_y = n; for (int i = block_internal_start_x; i < block_internal_end_x; ++i) { for (int j = block_internal_start_y; j < block_internal_end_y; ++j) { if (Dist[i * n + k] + Dist[k * n + j] < Dist[i * n + j]) Dist[i * n + j] = Dist[i * n + k] + Dist[k * n + j]; } } } } } } */
18bb3d4d1329db1873b407db953034ae692c66ba.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include <iostream> #include <random> #include <vector> #include <boost/timer/timer.hpp> __global__ void kernel(int * a, int * b, int count) { int offset = threadIdx.x + blockDim.x * blockIdx.x; for (int index = offset; index < count; index += blockDim.x * gridDim.x) { b[index] = 2 * a[index]; } } std::vector<int> ver1(const std::vector<int> &vec, const int N) { int *a_d = nullptr; hipMalloc((void**)&a_d, sizeof(int) * N); int *b_d = nullptr; hipMalloc((void**)&b_d, sizeof(int) * N); hipMemcpy(a_d, vec.data(), sizeof(int) * N, hipMemcpyHostToDevice); hipLaunchKernelGGL(( kernel), dim3(1024), dim3(256), 0, 0, a_d, b_d, N); std::vector<int> b(N); hipMemcpy(b.data(), b_d, sizeof(int) * N, hipMemcpyDeviceToHost); hipFree(a_d); hipFree(b_d); return b; } std::vector<int> ver2(const std::vector<int> &vec, const int N, const int n) { int *a_h = nullptr; hipHostMalloc((void**)&a_h, sizeof(int) * N); int *b_h = nullptr; hipHostMalloc((void**)&b_h, sizeof(int) * N); int *a_d = nullptr; hipMalloc((void**)&a_d, sizeof(int) * N); int *b_d = nullptr; hipMalloc((void**)&b_d, sizeof(int) * N); memcpy(a_h, vec.data(), sizeof(int) * N); hipStream_t str[3]; for (int i = 0; i < 3; ++i) { hipStreamCreate(str + i); } for (int i = 0; i < 3; ++i) { hipMemcpyAsync(a_d + n*i, a_h + n*i, sizeof(int) * n, hipMemcpyHostToDevice, str[i]); } for (int i = 0; i < 3; ++i) { hipLaunchKernelGGL(( kernel), dim3(1024), dim3(256), 0, str[i], a_d + n*i, b_d + n*i, n); } for (int i = 0; i < 3; ++i) { hipMemcpyAsync(b_h + n*i, b_d + n*i, sizeof(int) * n, hipMemcpyDeviceToHost, str[i]); } for (int i = 0; i < 3; ++i) { hipStreamSynchronize(str[i]); hipStreamDestroy(str[i]); } std::vector<int> b(N); memcpy(b.data(), b_h, sizeof(int) * N); hipFree(a_h); hipFree(b_h); hipFree(a_d); hipFree(b_d); return b; } int main(int argc, char **argv) { if (argc < 3) { std::cerr << "Usage: " << argv[0] << " N K" << std::endl; return -1; } int n = atoi(argv[1]); int k = atoi(argv[2]); int N = 3 * n; std::vector<int> vec(N); std::random_device rd; std::mt19937 mt(rd()); for (int i = 0; i < N; ++i) { vec[i] = mt(); } boost::timer::cpu_timer timer; std::vector<int> b; switch (k) { case 0: b = ver1(vec, N); break; case 1: b = ver2(vec, N, n); break; } std::cout << timer.format() << std::endl; int diff = 0; for (int i = 0; i < N; ++i) { diff += abs(vec[i] * 2 - b[i]); } std::cerr << "diff: " << diff << std::endl; return 0; }
18bb3d4d1329db1873b407db953034ae692c66ba.cu
#include <iostream> #include <random> #include <vector> #include <boost/timer/timer.hpp> __global__ void kernel(int * a, int * b, int count) { int offset = threadIdx.x + blockDim.x * blockIdx.x; for (int index = offset; index < count; index += blockDim.x * gridDim.x) { b[index] = 2 * a[index]; } } std::vector<int> ver1(const std::vector<int> &vec, const int N) { int *a_d = nullptr; cudaMalloc((void**)&a_d, sizeof(int) * N); int *b_d = nullptr; cudaMalloc((void**)&b_d, sizeof(int) * N); cudaMemcpy(a_d, vec.data(), sizeof(int) * N, cudaMemcpyHostToDevice); kernel<<<1024, 256>>>(a_d, b_d, N); std::vector<int> b(N); cudaMemcpy(b.data(), b_d, sizeof(int) * N, cudaMemcpyDeviceToHost); cudaFree(a_d); cudaFree(b_d); return b; } std::vector<int> ver2(const std::vector<int> &vec, const int N, const int n) { int *a_h = nullptr; cudaMallocHost((void**)&a_h, sizeof(int) * N); int *b_h = nullptr; cudaMallocHost((void**)&b_h, sizeof(int) * N); int *a_d = nullptr; cudaMalloc((void**)&a_d, sizeof(int) * N); int *b_d = nullptr; cudaMalloc((void**)&b_d, sizeof(int) * N); memcpy(a_h, vec.data(), sizeof(int) * N); cudaStream_t str[3]; for (int i = 0; i < 3; ++i) { cudaStreamCreate(str + i); } for (int i = 0; i < 3; ++i) { cudaMemcpyAsync(a_d + n*i, a_h + n*i, sizeof(int) * n, cudaMemcpyHostToDevice, str[i]); } for (int i = 0; i < 3; ++i) { kernel<<<1024, 256, 0, str[i]>>>(a_d + n*i, b_d + n*i, n); } for (int i = 0; i < 3; ++i) { cudaMemcpyAsync(b_h + n*i, b_d + n*i, sizeof(int) * n, cudaMemcpyDeviceToHost, str[i]); } for (int i = 0; i < 3; ++i) { cudaStreamSynchronize(str[i]); cudaStreamDestroy(str[i]); } std::vector<int> b(N); memcpy(b.data(), b_h, sizeof(int) * N); cudaFree(a_h); cudaFree(b_h); cudaFree(a_d); cudaFree(b_d); return b; } int main(int argc, char **argv) { if (argc < 3) { std::cerr << "Usage: " << argv[0] << " N K" << std::endl; return -1; } int n = atoi(argv[1]); int k = atoi(argv[2]); int N = 3 * n; std::vector<int> vec(N); std::random_device rd; std::mt19937 mt(rd()); for (int i = 0; i < N; ++i) { vec[i] = mt(); } boost::timer::cpu_timer timer; std::vector<int> b; switch (k) { case 0: b = ver1(vec, N); break; case 1: b = ver2(vec, N, n); break; } std::cout << timer.format() << std::endl; int diff = 0; for (int i = 0; i < N; ++i) { diff += abs(vec[i] * 2 - b[i]); } std::cerr << "diff: " << diff << std::endl; return 0; }
ea3896f43705e99be9c8e1251c86845082b02baa.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include <subsampling_helpers.h> #include "cuda_utils.h" #include "print_utils.h" #include "gen_random.h" GLOBAL void kerInitSampleMatrix( int *row, int *col, real *val, real *labels, real *srcLabels, int count, int offset, int maxRows ) { int idx = blockIdx.x * blockDim.x + threadIdx.x; if (idx < count) { row[ idx ] = idx; val[ idx ] = 1.; //reshuffle the labels here. labels[ idx ] = srcLabels[ col[ idx ] ] ; } } GLOBAL void kerInitSampleMatrixNoLabels( int *row, int *col, real *val, int count, int offset, int maxRows ) { int idx = blockIdx.x * blockDim.x + threadIdx.x; if (idx < count) { row[ idx ] = idx; val[ idx ] = 1.; } } void initSubSampledHessian( int offset, int rows, SparseDataset *sampledSet, real *sampledLabels, real *srcLabels, int sampledSize ){ int blocks = (sampledSize / BLOCK_SIZE) + (((sampledSize % BLOCK_SIZE) == 0) ? 0 : 1) ; if (sampledLabels == NULL && srcLabels == NULL){ hipLaunchKernelGGL(( kerInitSampleMatrixNoLabels) , dim3(blocks), dim3(BLOCK_SIZE) , 0, 0, sampledSet->rowPtr, sampledSet->colPtr, sampledSet->valPtr, sampledSize, offset, rows ); } else { hipLaunchKernelGGL(( kerInitSampleMatrix) , dim3(blocks), dim3(BLOCK_SIZE) , 0, 0, sampledSet->rowPtr, sampledSet->colPtr, sampledSet->valPtr, sampledLabels, srcLabels, sampledSize, offset, rows ); } hipDeviceSynchronize (); cudaCheckError (); } void prepareForNonUniformSampling (SparseDataset *samplingMat, int sampleSize, int *indices) { copy_host_device( indices, samplingMat->colPtr, sizeof(int) * sampleSize, hipMemcpyHostToDevice, ERROR_MEMCPY_HOST_DEVICE ); initSubSampledHessian( -1, -1, samplingMat, NULL, NULL, sampleSize); } void prepareForSampling (SparseDataset *sampledGradient, real *sampledLabels, real *srcLabels, int rows, int sampleSize, int *hostPtr) { int startRow = -1; //generate random rows here for sampling. //genRandomVector( hostPtr, sampleSize, rows ); genRandomVector( hostPtr, sampleSize, rows - 1 ); copy_host_device( hostPtr, sampledGradient->colPtr, sizeof(int) * sampleSize, hipMemcpyHostToDevice, ERROR_MEMCPY_HOST_DEVICE ); startRow = rand () % rows; initSubSampledHessian( startRow, rows, sampledGradient, sampledLabels, srcLabels, sampleSize); } void sampleDataset ( SparseDataset *spSampledGradient, real *dataset, int rows, int cols, int num_classes, real *subSampledGradient, int sampleSize ) { real alpha = 1.0; real beta = 0; cusparseCheckError ( hipsparseDcsrmm( cusparseHandle, HIPSPARSE_OPERATION_NON_TRANSPOSE, sampleSize, cols, rows, spSampledGradient->nnz, &alpha, spSampledGradient->descr, spSampledGradient->sortedVals, spSampledGradient->rowCsrPtr, spSampledGradient->colPtr, dataset, rows, &beta, subSampledGradient, sampleSize) ); } void sampleSparseDataset ( SparseDataset *spSampler, SparseDataset *spDataset, int rows, int cols, int num_classes, SparseDataset *spGradientSample, int sampleSize ) { int *nnzHostPtr = &spGradientSample->nnz; int baseC = 0; cusparseCheckError( hipsparseSetPointerMode( cusparseHandle, HIPSPARSE_POINTER_MODE_HOST) ); cusparseCheckError ( hipsparseXcsrgemmNnz( cusparseHandle, HIPSPARSE_OPERATION_NON_TRANSPOSE, HIPSPARSE_OPERATION_NON_TRANSPOSE, //sampleSize, cols, sampleSize, sampleSize, cols, rows, spSampler->descr, spSampler->nnz, spSampler->rowCsrPtr, spSampler->colPtr, spDataset->descr, spDataset->nnz, spDataset->rowCsrPtr, spDataset->colPtr, spGradientSample->descr, spGradientSample->rowCsrPtr, nnzHostPtr ) ); if (nnzHostPtr != NULL){ spGradientSample->nnz = *nnzHostPtr; } else { hipMemcpy( &spGradientSample->nnz, spGradientSample->rowCsrPtr + sampleSize, sizeof(int), hipMemcpyDeviceToHost ); hipMemcpy( &baseC, spGradientSample->rowCsrPtr, sizeof(int), hipMemcpyDeviceToHost ); spGradientSample->nnz -= baseC; } cusparseCheckError ( hipsparseDcsrgemm( cusparseHandle, HIPSPARSE_OPERATION_NON_TRANSPOSE, HIPSPARSE_OPERATION_NON_TRANSPOSE, //sampleSize, cols, sampleSize, sampleSize, cols, rows, spSampler->descr, spSampler->nnz, spSampler->sortedVals, spSampler->rowCsrPtr, spSampler->colPtr, spDataset->descr, spDataset->nnz, spDataset->sortedVals, spDataset->rowCsrPtr, spDataset->colPtr, spGradientSample->descr, spGradientSample->sortedVals, spGradientSample->rowCsrPtr, spGradientSample->colPtr ) ); }
ea3896f43705e99be9c8e1251c86845082b02baa.cu
#include <subsampling_helpers.h> #include "cuda_utils.h" #include "print_utils.h" #include "gen_random.h" GLOBAL void kerInitSampleMatrix( int *row, int *col, real *val, real *labels, real *srcLabels, int count, int offset, int maxRows ) { int idx = blockIdx.x * blockDim.x + threadIdx.x; if (idx < count) { row[ idx ] = idx; val[ idx ] = 1.; //reshuffle the labels here. labels[ idx ] = srcLabels[ col[ idx ] ] ; } } GLOBAL void kerInitSampleMatrixNoLabels( int *row, int *col, real *val, int count, int offset, int maxRows ) { int idx = blockIdx.x * blockDim.x + threadIdx.x; if (idx < count) { row[ idx ] = idx; val[ idx ] = 1.; } } void initSubSampledHessian( int offset, int rows, SparseDataset *sampledSet, real *sampledLabels, real *srcLabels, int sampledSize ){ int blocks = (sampledSize / BLOCK_SIZE) + (((sampledSize % BLOCK_SIZE) == 0) ? 0 : 1) ; if (sampledLabels == NULL && srcLabels == NULL){ kerInitSampleMatrixNoLabels <<< blocks, BLOCK_SIZE >>> (sampledSet->rowPtr, sampledSet->colPtr, sampledSet->valPtr, sampledSize, offset, rows ); } else { kerInitSampleMatrix <<< blocks, BLOCK_SIZE >>> (sampledSet->rowPtr, sampledSet->colPtr, sampledSet->valPtr, sampledLabels, srcLabels, sampledSize, offset, rows ); } cudaThreadSynchronize (); cudaCheckError (); } void prepareForNonUniformSampling (SparseDataset *samplingMat, int sampleSize, int *indices) { copy_host_device( indices, samplingMat->colPtr, sizeof(int) * sampleSize, cudaMemcpyHostToDevice, ERROR_MEMCPY_HOST_DEVICE ); initSubSampledHessian( -1, -1, samplingMat, NULL, NULL, sampleSize); } void prepareForSampling (SparseDataset *sampledGradient, real *sampledLabels, real *srcLabels, int rows, int sampleSize, int *hostPtr) { int startRow = -1; //generate random rows here for sampling. //genRandomVector( hostPtr, sampleSize, rows ); genRandomVector( hostPtr, sampleSize, rows - 1 ); copy_host_device( hostPtr, sampledGradient->colPtr, sizeof(int) * sampleSize, cudaMemcpyHostToDevice, ERROR_MEMCPY_HOST_DEVICE ); startRow = rand () % rows; initSubSampledHessian( startRow, rows, sampledGradient, sampledLabels, srcLabels, sampleSize); } void sampleDataset ( SparseDataset *spSampledGradient, real *dataset, int rows, int cols, int num_classes, real *subSampledGradient, int sampleSize ) { real alpha = 1.0; real beta = 0; cusparseCheckError ( cusparseDcsrmm( cusparseHandle, CUSPARSE_OPERATION_NON_TRANSPOSE, sampleSize, cols, rows, spSampledGradient->nnz, &alpha, spSampledGradient->descr, spSampledGradient->sortedVals, spSampledGradient->rowCsrPtr, spSampledGradient->colPtr, dataset, rows, &beta, subSampledGradient, sampleSize) ); } void sampleSparseDataset ( SparseDataset *spSampler, SparseDataset *spDataset, int rows, int cols, int num_classes, SparseDataset *spGradientSample, int sampleSize ) { int *nnzHostPtr = &spGradientSample->nnz; int baseC = 0; cusparseCheckError( cusparseSetPointerMode( cusparseHandle, CUSPARSE_POINTER_MODE_HOST) ); cusparseCheckError ( cusparseXcsrgemmNnz( cusparseHandle, CUSPARSE_OPERATION_NON_TRANSPOSE, CUSPARSE_OPERATION_NON_TRANSPOSE, //sampleSize, cols, sampleSize, sampleSize, cols, rows, spSampler->descr, spSampler->nnz, spSampler->rowCsrPtr, spSampler->colPtr, spDataset->descr, spDataset->nnz, spDataset->rowCsrPtr, spDataset->colPtr, spGradientSample->descr, spGradientSample->rowCsrPtr, nnzHostPtr ) ); if (nnzHostPtr != NULL){ spGradientSample->nnz = *nnzHostPtr; } else { cudaMemcpy( &spGradientSample->nnz, spGradientSample->rowCsrPtr + sampleSize, sizeof(int), cudaMemcpyDeviceToHost ); cudaMemcpy( &baseC, spGradientSample->rowCsrPtr, sizeof(int), cudaMemcpyDeviceToHost ); spGradientSample->nnz -= baseC; } cusparseCheckError ( cusparseDcsrgemm( cusparseHandle, CUSPARSE_OPERATION_NON_TRANSPOSE, CUSPARSE_OPERATION_NON_TRANSPOSE, //sampleSize, cols, sampleSize, sampleSize, cols, rows, spSampler->descr, spSampler->nnz, spSampler->sortedVals, spSampler->rowCsrPtr, spSampler->colPtr, spDataset->descr, spDataset->nnz, spDataset->sortedVals, spDataset->rowCsrPtr, spDataset->colPtr, spGradientSample->descr, spGradientSample->sortedVals, spGradientSample->rowCsrPtr, spGradientSample->colPtr ) ); }
d5d5dd6c75797aa568d115351c2777b2b95573db.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include "../header/main.h" //Matrix fuellen void initMatrix(float *ip, int size){ //random seed erstellen time_t t; srand((unsigned)time(&t)); //Matrix auffuellen for (int i = 0; i < size; ++i){ ip[i] = (float)(rand() & 0xFF) / 100.0f; } } //Matrix gegen einander testen void checkMatrix(double *Pserial, double *Pkernel, int N, char string[40]){ double epsilon = 1.0e-8; //Fehlertoleranz int match = 1; for (int i = 0; i < N; ++i){ if (abs(Pserial[i] - Pkernel[i]) > epsilon){ match = 0; printf("Arrays do not match between %s!\n", string); printf("host:%5.10f gpu:%5.10f at Element %d\n", Pserial[i], Pkernel[i], i); break; } } if (match) printf("Arrays match between %s.\n\n", string); } int main(int argc, char **argv){ int h_width;//Breite der Matrix int h_arraySize;// groesse der Matrix = width * width int memSize = 0; int memSizeErg = 0; char cBetween[20]; //variable for time calc double tStart = 0; double tEnd = 0; float *h_M;//Matrix 1 -> host = cpu float *h_N;//Matrix 2 -> host = cpu float *d_M;//Matrix 1 -> device = gpu <- im Devices-Speicher! float *d_N;//Matrix 2 -> device = gpu <- im Devices-Speicher! double *h_Ps;//Ergebnis serial double *h_Pk;//Ergebnis kernel without shared mem double *h_PkSmem;//Ergebnis kernel with shared mem double *d_Pk;//Ergebnis kernel <- im Devices-Speicher! dim3 dimGrid, dimBlock, dimGridSMEM, dimBlockSMEM; if(argc == 4){ h_width = atoi(argv[1]); dimBlock.x = atoi(argv[2]); dimBlock.y = atoi(argv[3]); dimBlock.z = 1; //Aus Block Grid bestimmen. dimGrid.x = ceil((float) h_width / dimBlock.x); dimGrid.y = ceil((float) h_width / dimBlock.y); dimGrid.z = 1; dimBlockSMEM.x = TILE_WIDTH; dimBlockSMEM.y = TILE_WIDTH; dimBlockSMEM.z = 1; dimGridSMEM.x = ceil((float)h_width / TILE_WIDTH); dimGridSMEM.y = ceil((float)h_width / TILE_WIDTH); dimGridSMEM.z = 1; }else{ printf("Falsche Parameter Anzahl!\nwidth blockX blockY!\n\n"); exit(-1); } //Bestimmen der KERNEL Parameter ( GRID Dim & BLOCK Dim ) printf("Normal: Grid: %d|%d|%d and Block: %d|%d|%d for width: %d.\n", dimGrid.x,dimGrid.y,dimGrid.z, dimBlock.x, dimBlock.y,dimBlock.z, h_width); printf("SMem: Grid: %d|%d|%d and Block: %d|%d|%d for width: %d.\n", dimGridSMEM.x,dimGridSMEM.y,dimGridSMEM.z, dimBlockSMEM.x, dimBlockSMEM.y,dimBlockSMEM.z, h_width); //mit der width die Array groesse berechen h_arraySize = h_width * h_width; //Speichergroesse bestimmen memSize = sizeof(float)*h_arraySize; memSizeErg = sizeof(double)*h_arraySize; //Host Arrays allokieren h_M = (float*)malloc(memSize); h_N = (float*)malloc(memSize); //Host-Ergebnis Array initialisieren h_Ps = (double*)malloc(memSizeErg); h_Pk = (double*)malloc(memSizeErg); h_PkSmem = (double*)malloc(memSizeErg); //devices array allokieren cudaErr(hipMalloc((void**)&d_M, memSize)); cudaErr(hipMalloc((void**)&d_N, memSize)); //Device-Ergebnis array initialisieren cudaErr(hipMalloc((void**)&d_Pk, memSizeErg)); //Matrix mit zufaelligen Werten fuellen initMatrix(h_M, h_arraySize); initMatrix(h_N, h_arraySize); //host array in Device-Array kopieren cudaErr(hipMemcpy(d_M, h_M, memSize, hipMemcpyHostToDevice)); cudaErr(hipMemcpy(d_N, h_N, memSize, hipMemcpyHostToDevice)); //Matrix zu Testzwecken serielle berchenen lassen & Zeitmessung //Zeitmessung implementieren!!! printf("Start CPU MatrixMult\n"); tStart = omp_get_wtime(); serialMatrixMult(h_M, h_N, h_Ps, h_width); tEnd = omp_get_wtime(); printf("Finish CPU MatrixMult in %f ms\n\n",1.e3*(tEnd - tStart)); /* //Matrix auf der GPU berechnen; am besten diverse GRID | BLOCK kompiationen Testen printf("Start GPU MatrixMult aufwaermen\n"); tStart = omp_get_wtime(); cudaMatrixMult<<<dimGrid, dimBlock>>>(d_M, d_N, d_Pk, h_width); cudaErr(hipDeviceSynchronize()); tEnd = omp_get_wtime(); printf("Finish GPU MatrixMult in %f ms\n\n", 1.e3*(tEnd - tStart)); printf("Start GPU MatrixMult jetzt aber richtig.\n"); tStart = omp_get_wtime(); cudaMatrixMult<<<dimGrid, dimBlock>>>(d_M, d_N, d_Pk, h_width); cudaErr(hipDeviceSynchronize()); tEnd = omp_get_wtime(); printf("Finish GPU MatrixMult in %f ms\n\n", 1.e3*(tEnd - tStart)); //Ergebnis kopieren cudaErr(hipMemcpy(h_Pk, d_Pk, memSizeErg, hipMemcpyDeviceToHost)); //Matrix testen strcpy(cBetween, "CPU - GPU w\\o smem"); checkMatrix(h_Ps, h_Pk, h_arraySize,cBetween); */ strcpy(cBetween, "CPU - GPU with smem"); printf("Start GPU MatrixMult SharedMem aufwaermen.\n"); tStart = omp_get_wtime(); hipLaunchKernelGGL(( cudaMatrixMultWithSMem), dim3(dimGridSMEM), dim3(dimBlockSMEM), 0, 0, d_M, d_N, d_Pk, h_width); cudaErr(hipDeviceSynchronize()); tEnd = omp_get_wtime(); printf("Finish GPU MatrixMult SharedMem in %f ms\n\n", 1.e3*(tEnd - tStart)); printf("Start GPU MatrixMult SharedMem jetzt aber richtig..\n"); tStart = omp_get_wtime(); hipLaunchKernelGGL(( cudaMatrixMultWithSMem), dim3(dimGridSMEM), dim3(dimBlockSMEM), 0, 0, d_M, d_N, d_Pk, h_width); cudaErr(hipDeviceSynchronize()); tEnd = omp_get_wtime(); printf("Finish GPU MatrixMult SharedMem in %f ms\n\n", 1.e3*(tEnd - tStart)); //Ergebnis kopieren cudaErr(hipMemcpy(h_PkSmem, d_Pk, memSizeErg, hipMemcpyDeviceToHost)); //Matrix testen checkMatrix(h_Ps, h_PkSmem, h_arraySize, cBetween); //Alles befreien free(h_M); free(h_N); free(h_Ps); cudaErr(hipFree(d_M)); cudaErr(hipFree(d_N)); cudaErr(hipFree(d_Pk)); //nicht vergessen ;-) hipDeviceReset(); //Programm mit Erfolg beenden return 0; }
d5d5dd6c75797aa568d115351c2777b2b95573db.cu
#include "../header/main.h" //Matrix fuellen void initMatrix(float *ip, int size){ //random seed erstellen time_t t; srand((unsigned)time(&t)); //Matrix auffuellen for (int i = 0; i < size; ++i){ ip[i] = (float)(rand() & 0xFF) / 100.0f; } } //Matrix gegen einander testen void checkMatrix(double *Pserial, double *Pkernel, int N, char string[40]){ double epsilon = 1.0e-8; //Fehlertoleranz int match = 1; for (int i = 0; i < N; ++i){ if (abs(Pserial[i] - Pkernel[i]) > epsilon){ match = 0; printf("Arrays do not match between %s!\n", string); printf("host:%5.10f gpu:%5.10f at Element %d\n", Pserial[i], Pkernel[i], i); break; } } if (match) printf("Arrays match between %s.\n\n", string); } int main(int argc, char **argv){ int h_width;//Breite der Matrix int h_arraySize;// groesse der Matrix = width * width int memSize = 0; int memSizeErg = 0; char cBetween[20]; //variable for time calc double tStart = 0; double tEnd = 0; float *h_M;//Matrix 1 -> host = cpu float *h_N;//Matrix 2 -> host = cpu float *d_M;//Matrix 1 -> device = gpu <- im Devices-Speicher! float *d_N;//Matrix 2 -> device = gpu <- im Devices-Speicher! double *h_Ps;//Ergebnis serial double *h_Pk;//Ergebnis kernel without shared mem double *h_PkSmem;//Ergebnis kernel with shared mem double *d_Pk;//Ergebnis kernel <- im Devices-Speicher! dim3 dimGrid, dimBlock, dimGridSMEM, dimBlockSMEM; if(argc == 4){ h_width = atoi(argv[1]); dimBlock.x = atoi(argv[2]); dimBlock.y = atoi(argv[3]); dimBlock.z = 1; //Aus Block Grid bestimmen. dimGrid.x = ceil((float) h_width / dimBlock.x); dimGrid.y = ceil((float) h_width / dimBlock.y); dimGrid.z = 1; dimBlockSMEM.x = TILE_WIDTH; dimBlockSMEM.y = TILE_WIDTH; dimBlockSMEM.z = 1; dimGridSMEM.x = ceil((float)h_width / TILE_WIDTH); dimGridSMEM.y = ceil((float)h_width / TILE_WIDTH); dimGridSMEM.z = 1; }else{ printf("Falsche Parameter Anzahl!\nwidth blockX blockY!\n\n"); exit(-1); } //Bestimmen der KERNEL Parameter ( GRID Dim & BLOCK Dim ) printf("Normal: Grid: %d|%d|%d and Block: %d|%d|%d for width: %d.\n", dimGrid.x,dimGrid.y,dimGrid.z, dimBlock.x, dimBlock.y,dimBlock.z, h_width); printf("SMem: Grid: %d|%d|%d and Block: %d|%d|%d for width: %d.\n", dimGridSMEM.x,dimGridSMEM.y,dimGridSMEM.z, dimBlockSMEM.x, dimBlockSMEM.y,dimBlockSMEM.z, h_width); //mit der width die Array groesse berechen h_arraySize = h_width * h_width; //Speichergroesse bestimmen memSize = sizeof(float)*h_arraySize; memSizeErg = sizeof(double)*h_arraySize; //Host Arrays allokieren h_M = (float*)malloc(memSize); h_N = (float*)malloc(memSize); //Host-Ergebnis Array initialisieren h_Ps = (double*)malloc(memSizeErg); h_Pk = (double*)malloc(memSizeErg); h_PkSmem = (double*)malloc(memSizeErg); //devices array allokieren cudaErr(cudaMalloc((void**)&d_M, memSize)); cudaErr(cudaMalloc((void**)&d_N, memSize)); //Device-Ergebnis array initialisieren cudaErr(cudaMalloc((void**)&d_Pk, memSizeErg)); //Matrix mit zufaelligen Werten fuellen initMatrix(h_M, h_arraySize); initMatrix(h_N, h_arraySize); //host array in Device-Array kopieren cudaErr(cudaMemcpy(d_M, h_M, memSize, cudaMemcpyHostToDevice)); cudaErr(cudaMemcpy(d_N, h_N, memSize, cudaMemcpyHostToDevice)); //Matrix zu Testzwecken serielle berchenen lassen & Zeitmessung //Zeitmessung implementieren!!! printf("Start CPU MatrixMult\n"); tStart = omp_get_wtime(); serialMatrixMult(h_M, h_N, h_Ps, h_width); tEnd = omp_get_wtime(); printf("Finish CPU MatrixMult in %f ms\n\n",1.e3*(tEnd - tStart)); /* //Matrix auf der GPU berechnen; am besten diverse GRID | BLOCK kompiationen Testen printf("Start GPU MatrixMult aufwaermen\n"); tStart = omp_get_wtime(); cudaMatrixMult<<<dimGrid, dimBlock>>>(d_M, d_N, d_Pk, h_width); cudaErr(cudaDeviceSynchronize()); tEnd = omp_get_wtime(); printf("Finish GPU MatrixMult in %f ms\n\n", 1.e3*(tEnd - tStart)); printf("Start GPU MatrixMult jetzt aber richtig.\n"); tStart = omp_get_wtime(); cudaMatrixMult<<<dimGrid, dimBlock>>>(d_M, d_N, d_Pk, h_width); cudaErr(cudaDeviceSynchronize()); tEnd = omp_get_wtime(); printf("Finish GPU MatrixMult in %f ms\n\n", 1.e3*(tEnd - tStart)); //Ergebnis kopieren cudaErr(cudaMemcpy(h_Pk, d_Pk, memSizeErg, cudaMemcpyDeviceToHost)); //Matrix testen strcpy(cBetween, "CPU - GPU w\\o smem"); checkMatrix(h_Ps, h_Pk, h_arraySize,cBetween); */ strcpy(cBetween, "CPU - GPU with smem"); printf("Start GPU MatrixMult SharedMem aufwaermen.\n"); tStart = omp_get_wtime(); cudaMatrixMultWithSMem<<<dimGridSMEM, dimBlockSMEM>>>(d_M, d_N, d_Pk, h_width); cudaErr(cudaDeviceSynchronize()); tEnd = omp_get_wtime(); printf("Finish GPU MatrixMult SharedMem in %f ms\n\n", 1.e3*(tEnd - tStart)); printf("Start GPU MatrixMult SharedMem jetzt aber richtig..\n"); tStart = omp_get_wtime(); cudaMatrixMultWithSMem<<<dimGridSMEM, dimBlockSMEM>>>(d_M, d_N, d_Pk, h_width); cudaErr(cudaDeviceSynchronize()); tEnd = omp_get_wtime(); printf("Finish GPU MatrixMult SharedMem in %f ms\n\n", 1.e3*(tEnd - tStart)); //Ergebnis kopieren cudaErr(cudaMemcpy(h_PkSmem, d_Pk, memSizeErg, cudaMemcpyDeviceToHost)); //Matrix testen checkMatrix(h_Ps, h_PkSmem, h_arraySize, cBetween); //Alles befreien free(h_M); free(h_N); free(h_Ps); cudaErr(cudaFree(d_M)); cudaErr(cudaFree(d_N)); cudaErr(cudaFree(d_Pk)); //nicht vergessen ;-) cudaDeviceReset(); //Programm mit Erfolg beenden return 0; }
751bc705f32cad4682356e38087463d63bae3043.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" template <typename T> __global__ void kernelgpuInitwdg2(T *f, T *xdg, T *uinf, T *param, int modelnumber, int ng, int ncx, int nce, int npe, int ne) { int i = threadIdx.x + blockIdx.x * blockDim.x; while (i<ng) { int j = i%npe; int k = (i-j)/npe; T xdg1 = xdg[j+npe*0+npe*ncx*k]; T xdg2 = xdg[j+npe*1+npe*ncx*k]; f[j+npe*0+npe*nce*k] = 0.0; i += blockDim.x * gridDim.x; } } template <typename T> void gpuInitwdg2(T *f, T *xdg, T *uinf, T *param, int modelnumber, int ng, int ncx, int nce, int npe, int ne) { int blockDim = 256; int gridDim = (ng + blockDim - 1) / blockDim; gridDim = (gridDim>1024)? 1024 : gridDim; hipLaunchKernelGGL(( kernelgpuInitwdg2), dim3(gridDim), dim3(blockDim), 0, 0, f, xdg, uinf, param, modelnumber, ng, ncx, nce, npe, ne); } template void gpuInitwdg2(double *, double *, double *, double *, int, int, int, int, int, int); template void gpuInitwdg2(float *, float *, float *, float *, int, int, int, int, int, int);
751bc705f32cad4682356e38087463d63bae3043.cu
template <typename T> __global__ void kernelgpuInitwdg2(T *f, T *xdg, T *uinf, T *param, int modelnumber, int ng, int ncx, int nce, int npe, int ne) { int i = threadIdx.x + blockIdx.x * blockDim.x; while (i<ng) { int j = i%npe; int k = (i-j)/npe; T xdg1 = xdg[j+npe*0+npe*ncx*k]; T xdg2 = xdg[j+npe*1+npe*ncx*k]; f[j+npe*0+npe*nce*k] = 0.0; i += blockDim.x * gridDim.x; } } template <typename T> void gpuInitwdg2(T *f, T *xdg, T *uinf, T *param, int modelnumber, int ng, int ncx, int nce, int npe, int ne) { int blockDim = 256; int gridDim = (ng + blockDim - 1) / blockDim; gridDim = (gridDim>1024)? 1024 : gridDim; kernelgpuInitwdg2<<<gridDim, blockDim>>>(f, xdg, uinf, param, modelnumber, ng, ncx, nce, npe, ne); } template void gpuInitwdg2(double *, double *, double *, double *, int, int, int, int, int, int); template void gpuInitwdg2(float *, float *, float *, float *, int, int, int, int, int, int);
e8926770f4e951856591f663f08a26068aa31122.hip
// !!! This is a file automatically generated by hipify!!! #include "THHTensorFFT.h" #include "hip/hip_runtime.h" #include <hipfft.h> #include <hipfftXt.h> #ifdef _CUFFT_H_ // cuFFT API errors static const char *_cudaGetErrorEnum(hipfftResult error) { switch (error) { case HIPFFT_SUCCESS: return "HIPFFT_SUCCESS"; case HIPFFT_INVALID_PLAN: return "HIPFFT_INVALID_PLAN"; case HIPFFT_ALLOC_FAILED: return "HIPFFT_ALLOC_FAILED"; case HIPFFT_INVALID_TYPE: return "HIPFFT_INVALID_TYPE"; case HIPFFT_INVALID_VALUE: return "HIPFFT_INVALID_VALUE"; case HIPFFT_INTERNAL_ERROR: return "HIPFFT_INTERNAL_ERROR"; case HIPFFT_EXEC_FAILED: return "HIPFFT_EXEC_FAILED"; case HIPFFT_SETUP_FAILED: return "HIPFFT_SETUP_FAILED"; case HIPFFT_INVALID_SIZE: return "HIPFFT_INVALID_SIZE"; case HIPFFT_UNALIGNED_DATA: return "HIPFFT_UNALIGNED_DATA"; case HIPFFT_INCOMPLETE_PARAMETER_LIST: return "HIPFFT_INCOMPLETE_PARAMETER_LIST"; case HIPFFT_INVALID_DEVICE: return "HIPFFT_INVALID_DEVICE"; case HIPFFT_PARSE_ERROR: return "HIPFFT_PARSE_ERROR"; case HIPFFT_NO_WORKSPACE: return "HIPFFT_NO_WORKSPACE"; case HIPFFT_NOT_IMPLEMENTED: return "HIPFFT_NOT_IMPLEMENTED"; case HIPFFT_LICENSE_ERROR: return "HIPFFT_LICENSE_ERROR"; case HIPFFT_NOT_SUPPORTED: return "HIPFFT_NOT_SUPPORTED"; } return "<unknown>"; } #endif inline void __cufftSafeCall(hipfftResult err, const char *file, const int line) { if (HIPFFT_SUCCESS != err) { fprintf(stderr,"CUFFT error in file '%s', line %d\n %d\nerror: %s\nterminating!\n", file, line, err, _cudaGetErrorEnum(err)); hipDeviceReset(); } } #include "generic/THCTensorFFT.cu" #include "THHGenerateComplexTypes.h"
e8926770f4e951856591f663f08a26068aa31122.cu
#include "THCTensorFFT.h" #include "cuda_runtime.h" #include <cufft.h> #include <cufftXt.h> #ifdef _CUFFT_H_ // cuFFT API errors static const char *_cudaGetErrorEnum(cufftResult error) { switch (error) { case CUFFT_SUCCESS: return "CUFFT_SUCCESS"; case CUFFT_INVALID_PLAN: return "CUFFT_INVALID_PLAN"; case CUFFT_ALLOC_FAILED: return "CUFFT_ALLOC_FAILED"; case CUFFT_INVALID_TYPE: return "CUFFT_INVALID_TYPE"; case CUFFT_INVALID_VALUE: return "CUFFT_INVALID_VALUE"; case CUFFT_INTERNAL_ERROR: return "CUFFT_INTERNAL_ERROR"; case CUFFT_EXEC_FAILED: return "CUFFT_EXEC_FAILED"; case CUFFT_SETUP_FAILED: return "CUFFT_SETUP_FAILED"; case CUFFT_INVALID_SIZE: return "CUFFT_INVALID_SIZE"; case CUFFT_UNALIGNED_DATA: return "CUFFT_UNALIGNED_DATA"; case CUFFT_INCOMPLETE_PARAMETER_LIST: return "CUFFT_INCOMPLETE_PARAMETER_LIST"; case CUFFT_INVALID_DEVICE: return "CUFFT_INVALID_DEVICE"; case CUFFT_PARSE_ERROR: return "CUFFT_PARSE_ERROR"; case CUFFT_NO_WORKSPACE: return "CUFFT_NO_WORKSPACE"; case CUFFT_NOT_IMPLEMENTED: return "CUFFT_NOT_IMPLEMENTED"; case CUFFT_LICENSE_ERROR: return "CUFFT_LICENSE_ERROR"; case CUFFT_NOT_SUPPORTED: return "CUFFT_NOT_SUPPORTED"; } return "<unknown>"; } #endif inline void __cufftSafeCall(cufftResult err, const char *file, const int line) { if (CUFFT_SUCCESS != err) { fprintf(stderr,"CUFFT error in file '%s', line %d\n %d\nerror: %s\nterminating!\n", file, line, err, _cudaGetErrorEnum(err)); cudaDeviceReset(); } } #include "generic/THCTensorFFT.cu" #include "THCGenerateComplexTypes.h"
e4bfe85609a553c6d441b325c53f6b8317207013.hip
// !!! This is a file automatically generated by hipify!!! #include <iostream> #include <stdlib.h> #include <stdio.h> #include <string.h> #include <time.h> #include <hip/hip_runtime.h> #include <stdint.h> #include <math.h> #include <unistd.h> #include <omp.h> #include<limits> double diff(timespec start, timespec end) { double a=0; if((end.tv_nsec-start.tv_nsec)<0) { a=end.tv_sec-start.tv_sec-1; a+=(1000000000+end.tv_nsec-start.tv_nsec)/1000000000.0; } else { a=end.tv_sec-start.tv_sec+(end.tv_nsec-start.tv_nsec)/1000000000.0; } return a; } struct NUM_ADD { short2 read_reference_number; int address_array; }; __global__ void calculate_cigar( int size, char * data, NUM_ADD *num_add,int4 * result, int * direction) //, char * result { int offset=blockIdx.x; __shared__ short2 read_reference_number; __shared__ char * read_base_array; __shared__ char4 * reference_base_array; __shared__ int mismatch; __shared__ int match; __shared__ int open; __shared__ int extend; __shared__ short2 * direction_index; while(offset<size) { if( threadIdx.x==0) { read_reference_number=num_add[offset].read_reference_number; read_base_array=(char *) (data+num_add[offset].address_array); reference_base_array=(char4 *) (read_base_array+(read_reference_number.x+127)/128*128); direction_index=(short2 *) (direction+offset*640*1100); } __syncthreads(); __shared__ char reference_base_in_char[600]; int hh=(read_reference_number.y+4-1)/4; int tt=(hh+blockDim.x-1)/blockDim.x; for(int ii=0;ii<tt;ii++) { int aa=threadIdx.x+ii*blockDim.x; if(aa< hh) { char4 reference_base_in_thread; reference_base_in_thread=reference_base_array[aa]; //Is it right to get data from global memory reference_base_in_char[aa*4]=reference_base_in_thread.x; reference_base_in_char[aa*4+1]=reference_base_in_thread.y; reference_base_in_char[aa*4+2]=reference_base_in_thread.z; reference_base_in_char[aa*4+3]=reference_base_in_thread.w; } } __shared__ int MM[449]; __shared__ int gap_h[449]; //insertion __shared__ short2 gap_size_h[449]; //insertion __shared__ int result_col; __shared__ int result_row; __shared__ int result_col_index; __shared__ int result_row_index; //__shared__ char cigar_m[128]; //__shared__ int cigar_int_m[128]; //int final_result; //int final_i; //int final_j; if(threadIdx.x==0) { MM[0]=0; gap_h[0]=-1000000000;//std::numeric_limits<int>::min()/2; gap_size_h[0].x=0; gap_size_h[0].y=0; match=200; mismatch=-150; open=-260; extend=-11; result_col=-1000000000;//std::numeric_limits<int>::min()/2; result_row=-1000000000;//std::numeric_limits<int>::min()/2; // for(int i=0;i<read_reference_number.y;i++) // printf("%c",reference_base_in_char[i]); // printf("\n"); // for(int i=0;i<read_reference_number.x;i++) // printf("%c",read_base_array[i]); } __syncthreads(); int read_number=read_reference_number.x; { char read_base; read_base=read_base_array[threadIdx.x]; int gap_v=-1000000000;//std::numeric_limits<int>::min()/2;; int gap_size_v=0; //Deletion int M=0; //now int step_right; //now int ki=0;//insertion h negetive //deletion v int MMM=0; short mt=0; short2 curmt; curmt.x=0; curmt.y=0; int current_reference_id=0; for(int j=0;j<read_reference_number.x+read_reference_number.y-1;j++) { int aa=j-threadIdx.x; if( aa>=0 && (current_reference_id<read_reference_number.y)) { int prev_gap=M+open; //M which is cacluated by last step in the same thread gap_v+=extend; if(prev_gap>gap_v) { gap_v=prev_gap; gap_size_v=1; } else gap_size_v++; char reference_base_each=reference_base_in_char[current_reference_id]; M=MMM+(read_base==reference_base_each? match:mismatch); prev_gap=MM[threadIdx.x]+open; step_right=gap_h[threadIdx.x]+extend; if(prev_gap>step_right) { step_right=prev_gap; ki=1; } else ki=gap_size_h[threadIdx.x].x+1; bool diag=(M>=gap_v)&&(M>=step_right); curmt.y=0; if(diag) { curmt.x=0; //if(threadIdx.x==0||current_reference_id==0) // curmt.y=0; // else curmt.y=mt+1; // curBtrack=0; } else if(step_right>=gap_v) { M=step_right; curmt.x=0-ki; // curBtrack=0-ki; } else { M=gap_v; curmt.x=gap_size_v; //curBtrack=gap_size_v; } MMM=MM[threadIdx.x]; mt=gap_size_h[threadIdx.x].y; direction_index[640*current_reference_id+threadIdx.x]=curmt; //if(threadIdx.x==read_reference_number.x-3) //printf("%p %d ", &direction_index[800*j+threadIdx.x],curBtrack); if(current_reference_id==read_reference_number.y-1) { if(M>=result_row) { result_row=M; result_row_index=threadIdx.x; // } //printf("%d %d %d %d %d \n",read_reference_number.y,M,result_row,result_row_index,threadIdx.x); } if(threadIdx.x==read_reference_number.x-1) { if(M>=result_col) { result_col=M; result_col_index=current_reference_id; // +1 } } current_reference_id++; } __syncthreads(); //to make sure that the former value of MM[threadIdx.x+1] are used by other threads. MM[threadIdx.x+1]=M; gap_h[threadIdx.x+1]=step_right; gap_size_h[threadIdx.x+1].x=ki; gap_size_h[threadIdx.x+1].y=curmt.y; __syncthreads(); // there should be two synthreads(); // to make sure that all of MM[threadIdx.x+1] have get a new value before M,D and I changed. } } // char state;//0 match; 1 mistmatch; 2 inseriton; 3 deletion // __shared__ int cigar_index; // int segment_length; // short2 btr; // char new_state; // int step_length; int4 result4; if(threadIdx.x==read_reference_number.x-1) { //printf("%d %d %d %d\n", result_row,result_col, result_row_index,result_col_index); if(result_row>result_col||result_row==result_col&&(read_reference_number.x-result_row_index-1)>(read_reference_number.y-result_col_index-1)) { // final_result=result_row; result4.x=read_reference_number.y-1; result4.y=result_row_index; result4.z=read_reference_number.x-1-result_row_index; } else { // final_result=result_col; result4.x=result_col_index; result4.y=read_reference_number.x-1; result4.z=0; } //result[offset*3]=final_result; //printf("%d\n",final_result); //result4.x=fina_i; //result4.y=fina_j; //result4.z=segment_length; result[offset]=result4; } __syncthreads(); offset+=gridDim.x; } } __global__ void calculate_cigar_2( int size, int4 * result, char * cigar,int * cigar_int,int * direction) //, char * result { int offset=blockIdx.x; int4 result4;; short2 * direction_index; __shared__ char * cigar_store; __shared__ int *cigar_int_store; __shared__ char cigar_m[128]; __shared__ int cigar_int_m[128]; while(offset<size) { char state;//0 match; 1 mistmatch; 2 inseriton; 3 deletion __shared__ int cigar_index; int segment_length; short2 btr; char new_state; int step_length; if( threadIdx.x==0) { result4=result[offset]; direction_index=(short2 *) (direction+offset*640*1100); cigar_store=(char *) (cigar+offset*sizeof(char)*128); cigar_int_store=(int *) (cigar_int+offset*128); //printf("\n %d %d\n", final_i,final_j); cigar_index=0; if(result4.z>0) { cigar_m[cigar_index]='S'; cigar_int_m[cigar_index]=result4.z; cigar_index++; } segment_length=0; state='N'; do { btr=direction_index[(result4.x+result4.y)*640+result4.y]; if(btr.x>0) { new_state='D'; step_length=btr.x; result4.x-=step_length; } else if(btr.x<0) { new_state='I'; step_length=0-btr.x; result4.y-=step_length; } else { new_state='M'; step_length=btr.y; result4.x-=step_length; result4.y-=step_length; } if(state=='N') state=new_state; if(state==new_state) { segment_length+=step_length; } else { cigar_m[cigar_index]=state; cigar_int_m[cigar_index]=segment_length; segment_length=step_length; cigar_index++; state=new_state; } }while(result4.x>=0&&result4.y>=0); cigar_m[cigar_index]=state; cigar_int_m[cigar_index]=segment_length; cigar_index++; if(result4.y>=0) { cigar_m[cigar_index]='S'; cigar_int_m[cigar_index]=result4.y+1; cigar_index++; } result4.z=result4.x+1; result4.w=cigar_index; result[offset]=result4; /* for(int i=cigar_index-1;i>=0;i--) { printf("%d%c",cigar_int_m[i],cigar_m[i]); } */ } __syncthreads(); if(threadIdx.x<cigar_index && cigar_index<=blockDim.x) { // if(threadIdx.x==0) // printf("%c %d\n",cigar_m[cigar_index-1-threadIdx.x], cigar_int_m[cigar_index-1-threadIdx.x]); cigar_store[threadIdx.x]=cigar_m[cigar_index-1-threadIdx.x]; cigar_int_store[threadIdx.x]=cigar_int_m[cigar_index-1-threadIdx.x]; // if(threadIdx.x==0) // printf("%c %d\n", cigar_store[threadIdx.x],cigar_int_store[threadIdx.x]); } offset+=gridDim.x; } } struct InputData { char read_base[600]; char reference_base[600]; }; int main(int artc, char* args[]) { FILE * file; file=fopen(args[1],"r"); int size; double computation_time=0;//total_time=0; timespec start,finish; int total_size=0; /* char data[200][1000]; for(int i=0;i<101;i++) { fscanf(file,"%s ", data[i]); } int row=atoi(args[2]); int col=atoi(args[3]); size=row*col; for(int ww=0;ww<1;ww++) { int index=0; InputData * inputdata=(InputData* )malloc(size*(sizeof(InputData))); for(int i=0;i<row;i++) for(int j=0;j<col;j++) { strcpy(inputdata[index].reference_base,data[1]); strcpy(inputdata[index].read_base,data[1]); index++; } */ /* fscanf(file,"%d",&size); while(!feof(file)) { InputData * inputdata=(InputData* )malloc(size*(sizeof(InputData))); for(int i=0;i<size;i++) { fscanf(file,"%s ",inputdata[i].reference_base); fscanf(file,"%s ",inputdata[i].read_base); } */ char data[200][1000]; for(int i=0;i<101;i++) { fscanf(file,"%s ", data[i]); } int row=atoi(args[2]); int col=atoi(args[3]); size=row*col; for(int ww=0;ww<1;ww++) { int index=0; InputData * inputdata=(InputData* )malloc(size*(sizeof(InputData))); for(int i=0;i<row;i++) for(int j=0;j<col;j++) { strcpy(inputdata[index].reference_base,data[i]); strcpy(inputdata[index].read_base,data[j]); index++; } //data preparation. char * data_h_total=(char*)malloc(size * 640* sizeof (char)*2+(size*sizeof(NUM_ADD)+127)/128*128); NUM_ADD * data_num_add=(NUM_ADD *) (data_h_total); char * data_h=data_h_total+(size*sizeof(NUM_ADD)+127)/128*128; //.thus we donot need to worry about align int data_size=0; char * data_d_total; hipMalloc( (char **) &data_d_total, (size*sizeof(NUM_ADD)+127)/128*128+size *( 640 )* sizeof (char)*2+sizeof(int)*size*4); int * result_h=(int *) malloc(sizeof(int)*size*4); char * cigar_h=(char *) malloc(sizeof(char)*size*128); int * cigar_int_h=(int *) malloc(sizeof(int)*size*128); for(int i=0;i<size;i++) { char4 reference_tep[150]; int read_len=strlen(inputdata[i].read_base); int ref_len=strlen(inputdata[i].reference_base); total_size+=read_len*ref_len; int new_len=(ref_len+4-1)/4; for(int j=0;j<new_len;j++) { reference_tep[j].x=inputdata[i].reference_base[j*4]; if(j*4+1<ref_len) reference_tep[j].y=inputdata[i].reference_base[j*4+1]; if(j*4+2<ref_len) reference_tep[j].z=inputdata[i].reference_base[j*4+2]; if(j*4+3<ref_len) reference_tep[j].w=inputdata[i].reference_base[j*4+3]; } data_num_add[i].read_reference_number.x=read_len; data_num_add[i].read_reference_number.y=ref_len; data_num_add[i].address_array=data_size; memcpy(data_h,inputdata[i].read_base,read_len); data_h+=(read_len+128-1)/128*128; data_size+=(read_len+128-1)/128*128; memcpy(data_h,reference_tep,sizeof(char4)* new_len); data_h+=(new_len*sizeof(char4)+127)/128*128; data_size+=(new_len*sizeof(char4)+127)/128*128; } int data_size_to_copy=data_size+(size*sizeof(NUM_ADD)+127)/128*128; hipMemcpy(data_d_total,data_h_total,data_size_to_copy,hipMemcpyHostToDevice); NUM_ADD * num_add_d=(NUM_ADD *) (data_d_total); char * data_d=data_d_total+(size*sizeof(NUM_ADD)+127)/128*128; int4 * result_d=(int4 *) (data_d_total+data_size_to_copy); char * cigar; hipMalloc( (char **) &cigar, size * (128* sizeof (char)+128*sizeof(int))); int * cigar_int=(int *) (cigar+size*128*sizeof(char)); int * direction; hipMalloc( (int **) & direction, size * (640*1100* sizeof (int))); dim3 block(448); dim3 grid(size); clock_gettime(CLOCK_MONOTONIC_RAW,&start); hipLaunchKernelGGL(( calculate_cigar), dim3(grid),dim3(block), 0, 0, size,data_d,num_add_d,result_d,direction); //result // calculate_cigar_2<<<grid,block>>> (size,result_d,cigar,cigar_int,direction); //result hipMemcpy(result_h,result_d,size*sizeof(int)*4,hipMemcpyDeviceToHost); hipMemcpy(cigar_h,cigar,128*sizeof(char)*size, hipMemcpyDeviceToHost); hipMemcpy(cigar_int_h,cigar_int,128*sizeof(int)*size,hipMemcpyDeviceToHost); clock_gettime(CLOCK_MONOTONIC_RAW,&finish); computation_time+=diff(start,finish); /* for(int i=0;i<size;i++) { printf("%d\n",result_h[i*4+1]); printf("["); for(int j=0;j<result_h[i*4+3];j++) { if(j!=0) printf(", "); printf("%d%c",cigar_int_h[128*i+j],cigar_h[128*i+j]); } printf("]\n"); } */ hipFree(direction); free(data_h_total); hipFree(data_d_total); free(inputdata); hipFree(cigar); free(cigar_int_h); free(cigar_h); // fscanf(file,"%d",&size); } printf(" computation_time= %e %d GCUPs=%lf\n",computation_time,total_size,( (double)total_size)/computation_time/1000000000); return 0; }
e4bfe85609a553c6d441b325c53f6b8317207013.cu
#include <iostream> #include <stdlib.h> #include <stdio.h> #include <string.h> #include <time.h> #include <cuda.h> #include <stdint.h> #include <math.h> #include <unistd.h> #include <omp.h> #include<limits> double diff(timespec start, timespec end) { double a=0; if((end.tv_nsec-start.tv_nsec)<0) { a=end.tv_sec-start.tv_sec-1; a+=(1000000000+end.tv_nsec-start.tv_nsec)/1000000000.0; } else { a=end.tv_sec-start.tv_sec+(end.tv_nsec-start.tv_nsec)/1000000000.0; } return a; } struct NUM_ADD { short2 read_reference_number; int address_array; }; __global__ void calculate_cigar( int size, char * data, NUM_ADD *num_add,int4 * result, int * direction) //, char * result { int offset=blockIdx.x; __shared__ short2 read_reference_number; __shared__ char * read_base_array; __shared__ char4 * reference_base_array; __shared__ int mismatch; __shared__ int match; __shared__ int open; __shared__ int extend; __shared__ short2 * direction_index; while(offset<size) { if( threadIdx.x==0) { read_reference_number=num_add[offset].read_reference_number; read_base_array=(char *) (data+num_add[offset].address_array); reference_base_array=(char4 *) (read_base_array+(read_reference_number.x+127)/128*128); direction_index=(short2 *) (direction+offset*640*1100); } __syncthreads(); __shared__ char reference_base_in_char[600]; int hh=(read_reference_number.y+4-1)/4; int tt=(hh+blockDim.x-1)/blockDim.x; for(int ii=0;ii<tt;ii++) { int aa=threadIdx.x+ii*blockDim.x; if(aa< hh) { char4 reference_base_in_thread; reference_base_in_thread=reference_base_array[aa]; //Is it right to get data from global memory reference_base_in_char[aa*4]=reference_base_in_thread.x; reference_base_in_char[aa*4+1]=reference_base_in_thread.y; reference_base_in_char[aa*4+2]=reference_base_in_thread.z; reference_base_in_char[aa*4+3]=reference_base_in_thread.w; } } __shared__ int MM[449]; __shared__ int gap_h[449]; //insertion __shared__ short2 gap_size_h[449]; //insertion __shared__ int result_col; __shared__ int result_row; __shared__ int result_col_index; __shared__ int result_row_index; //__shared__ char cigar_m[128]; //__shared__ int cigar_int_m[128]; //int final_result; //int final_i; //int final_j; if(threadIdx.x==0) { MM[0]=0; gap_h[0]=-1000000000;//std::numeric_limits<int>::min()/2; gap_size_h[0].x=0; gap_size_h[0].y=0; match=200; mismatch=-150; open=-260; extend=-11; result_col=-1000000000;//std::numeric_limits<int>::min()/2; result_row=-1000000000;//std::numeric_limits<int>::min()/2; // for(int i=0;i<read_reference_number.y;i++) // printf("%c",reference_base_in_char[i]); // printf("\n"); // for(int i=0;i<read_reference_number.x;i++) // printf("%c",read_base_array[i]); } __syncthreads(); int read_number=read_reference_number.x; { char read_base; read_base=read_base_array[threadIdx.x]; int gap_v=-1000000000;//std::numeric_limits<int>::min()/2;; int gap_size_v=0; //Deletion int M=0; //now int step_right; //now int ki=0;//insertion h negetive //deletion v int MMM=0; short mt=0; short2 curmt; curmt.x=0; curmt.y=0; int current_reference_id=0; for(int j=0;j<read_reference_number.x+read_reference_number.y-1;j++) { int aa=j-threadIdx.x; if( aa>=0 && (current_reference_id<read_reference_number.y)) { int prev_gap=M+open; //M which is cacluated by last step in the same thread gap_v+=extend; if(prev_gap>gap_v) { gap_v=prev_gap; gap_size_v=1; } else gap_size_v++; char reference_base_each=reference_base_in_char[current_reference_id]; M=MMM+(read_base==reference_base_each? match:mismatch); prev_gap=MM[threadIdx.x]+open; step_right=gap_h[threadIdx.x]+extend; if(prev_gap>step_right) { step_right=prev_gap; ki=1; } else ki=gap_size_h[threadIdx.x].x+1; bool diag=(M>=gap_v)&&(M>=step_right); curmt.y=0; if(diag) { curmt.x=0; //if(threadIdx.x==0||current_reference_id==0) // curmt.y=0; // else curmt.y=mt+1; // curBtrack=0; } else if(step_right>=gap_v) { M=step_right; curmt.x=0-ki; // curBtrack=0-ki; } else { M=gap_v; curmt.x=gap_size_v; //curBtrack=gap_size_v; } MMM=MM[threadIdx.x]; mt=gap_size_h[threadIdx.x].y; direction_index[640*current_reference_id+threadIdx.x]=curmt; //if(threadIdx.x==read_reference_number.x-3) //printf("%p %d ", &direction_index[800*j+threadIdx.x],curBtrack); if(current_reference_id==read_reference_number.y-1) { if(M>=result_row) { result_row=M; result_row_index=threadIdx.x; // } //printf("%d %d %d %d %d \n",read_reference_number.y,M,result_row,result_row_index,threadIdx.x); } if(threadIdx.x==read_reference_number.x-1) { if(M>=result_col) { result_col=M; result_col_index=current_reference_id; // +1 } } current_reference_id++; } __syncthreads(); //to make sure that the former value of MM[threadIdx.x+1] are used by other threads. MM[threadIdx.x+1]=M; gap_h[threadIdx.x+1]=step_right; gap_size_h[threadIdx.x+1].x=ki; gap_size_h[threadIdx.x+1].y=curmt.y; __syncthreads(); // there should be two synthreads(); // to make sure that all of MM[threadIdx.x+1] have get a new value before M,D and I changed. } } // char state;//0 match; 1 mistmatch; 2 inseriton; 3 deletion // __shared__ int cigar_index; // int segment_length; // short2 btr; // char new_state; // int step_length; int4 result4; if(threadIdx.x==read_reference_number.x-1) { //printf("%d %d %d %d\n", result_row,result_col, result_row_index,result_col_index); if(result_row>result_col||result_row==result_col&&(read_reference_number.x-result_row_index-1)>(read_reference_number.y-result_col_index-1)) { // final_result=result_row; result4.x=read_reference_number.y-1; result4.y=result_row_index; result4.z=read_reference_number.x-1-result_row_index; } else { // final_result=result_col; result4.x=result_col_index; result4.y=read_reference_number.x-1; result4.z=0; } //result[offset*3]=final_result; //printf("%d\n",final_result); //result4.x=fina_i; //result4.y=fina_j; //result4.z=segment_length; result[offset]=result4; } __syncthreads(); offset+=gridDim.x; } } __global__ void calculate_cigar_2( int size, int4 * result, char * cigar,int * cigar_int,int * direction) //, char * result { int offset=blockIdx.x; int4 result4;; short2 * direction_index; __shared__ char * cigar_store; __shared__ int *cigar_int_store; __shared__ char cigar_m[128]; __shared__ int cigar_int_m[128]; while(offset<size) { char state;//0 match; 1 mistmatch; 2 inseriton; 3 deletion __shared__ int cigar_index; int segment_length; short2 btr; char new_state; int step_length; if( threadIdx.x==0) { result4=result[offset]; direction_index=(short2 *) (direction+offset*640*1100); cigar_store=(char *) (cigar+offset*sizeof(char)*128); cigar_int_store=(int *) (cigar_int+offset*128); //printf("\n %d %d\n", final_i,final_j); cigar_index=0; if(result4.z>0) { cigar_m[cigar_index]='S'; cigar_int_m[cigar_index]=result4.z; cigar_index++; } segment_length=0; state='N'; do { btr=direction_index[(result4.x+result4.y)*640+result4.y]; if(btr.x>0) { new_state='D'; step_length=btr.x; result4.x-=step_length; } else if(btr.x<0) { new_state='I'; step_length=0-btr.x; result4.y-=step_length; } else { new_state='M'; step_length=btr.y; result4.x-=step_length; result4.y-=step_length; } if(state=='N') state=new_state; if(state==new_state) { segment_length+=step_length; } else { cigar_m[cigar_index]=state; cigar_int_m[cigar_index]=segment_length; segment_length=step_length; cigar_index++; state=new_state; } }while(result4.x>=0&&result4.y>=0); cigar_m[cigar_index]=state; cigar_int_m[cigar_index]=segment_length; cigar_index++; if(result4.y>=0) { cigar_m[cigar_index]='S'; cigar_int_m[cigar_index]=result4.y+1; cigar_index++; } result4.z=result4.x+1; result4.w=cigar_index; result[offset]=result4; /* for(int i=cigar_index-1;i>=0;i--) { printf("%d%c",cigar_int_m[i],cigar_m[i]); } */ } __syncthreads(); if(threadIdx.x<cigar_index && cigar_index<=blockDim.x) { // if(threadIdx.x==0) // printf("%c %d\n",cigar_m[cigar_index-1-threadIdx.x], cigar_int_m[cigar_index-1-threadIdx.x]); cigar_store[threadIdx.x]=cigar_m[cigar_index-1-threadIdx.x]; cigar_int_store[threadIdx.x]=cigar_int_m[cigar_index-1-threadIdx.x]; // if(threadIdx.x==0) // printf("%c %d\n", cigar_store[threadIdx.x],cigar_int_store[threadIdx.x]); } offset+=gridDim.x; } } struct InputData { char read_base[600]; char reference_base[600]; }; int main(int artc, char* args[]) { FILE * file; file=fopen(args[1],"r"); int size; double computation_time=0;//total_time=0; timespec start,finish; int total_size=0; /* char data[200][1000]; for(int i=0;i<101;i++) { fscanf(file,"%s ", data[i]); } int row=atoi(args[2]); int col=atoi(args[3]); size=row*col; for(int ww=0;ww<1;ww++) { int index=0; InputData * inputdata=(InputData* )malloc(size*(sizeof(InputData))); for(int i=0;i<row;i++) for(int j=0;j<col;j++) { strcpy(inputdata[index].reference_base,data[1]); strcpy(inputdata[index].read_base,data[1]); index++; } */ /* fscanf(file,"%d",&size); while(!feof(file)) { InputData * inputdata=(InputData* )malloc(size*(sizeof(InputData))); for(int i=0;i<size;i++) { fscanf(file,"%s ",inputdata[i].reference_base); fscanf(file,"%s ",inputdata[i].read_base); } */ char data[200][1000]; for(int i=0;i<101;i++) { fscanf(file,"%s ", data[i]); } int row=atoi(args[2]); int col=atoi(args[3]); size=row*col; for(int ww=0;ww<1;ww++) { int index=0; InputData * inputdata=(InputData* )malloc(size*(sizeof(InputData))); for(int i=0;i<row;i++) for(int j=0;j<col;j++) { strcpy(inputdata[index].reference_base,data[i]); strcpy(inputdata[index].read_base,data[j]); index++; } //data preparation. char * data_h_total=(char*)malloc(size * 640* sizeof (char)*2+(size*sizeof(NUM_ADD)+127)/128*128); NUM_ADD * data_num_add=(NUM_ADD *) (data_h_total); char * data_h=data_h_total+(size*sizeof(NUM_ADD)+127)/128*128; //.thus we donot need to worry about align int data_size=0; char * data_d_total; cudaMalloc( (char **) &data_d_total, (size*sizeof(NUM_ADD)+127)/128*128+size *( 640 )* sizeof (char)*2+sizeof(int)*size*4); int * result_h=(int *) malloc(sizeof(int)*size*4); char * cigar_h=(char *) malloc(sizeof(char)*size*128); int * cigar_int_h=(int *) malloc(sizeof(int)*size*128); for(int i=0;i<size;i++) { char4 reference_tep[150]; int read_len=strlen(inputdata[i].read_base); int ref_len=strlen(inputdata[i].reference_base); total_size+=read_len*ref_len; int new_len=(ref_len+4-1)/4; for(int j=0;j<new_len;j++) { reference_tep[j].x=inputdata[i].reference_base[j*4]; if(j*4+1<ref_len) reference_tep[j].y=inputdata[i].reference_base[j*4+1]; if(j*4+2<ref_len) reference_tep[j].z=inputdata[i].reference_base[j*4+2]; if(j*4+3<ref_len) reference_tep[j].w=inputdata[i].reference_base[j*4+3]; } data_num_add[i].read_reference_number.x=read_len; data_num_add[i].read_reference_number.y=ref_len; data_num_add[i].address_array=data_size; memcpy(data_h,inputdata[i].read_base,read_len); data_h+=(read_len+128-1)/128*128; data_size+=(read_len+128-1)/128*128; memcpy(data_h,reference_tep,sizeof(char4)* new_len); data_h+=(new_len*sizeof(char4)+127)/128*128; data_size+=(new_len*sizeof(char4)+127)/128*128; } int data_size_to_copy=data_size+(size*sizeof(NUM_ADD)+127)/128*128; cudaMemcpy(data_d_total,data_h_total,data_size_to_copy,cudaMemcpyHostToDevice); NUM_ADD * num_add_d=(NUM_ADD *) (data_d_total); char * data_d=data_d_total+(size*sizeof(NUM_ADD)+127)/128*128; int4 * result_d=(int4 *) (data_d_total+data_size_to_copy); char * cigar; cudaMalloc( (char **) &cigar, size * (128* sizeof (char)+128*sizeof(int))); int * cigar_int=(int *) (cigar+size*128*sizeof(char)); int * direction; cudaMalloc( (int **) & direction, size * (640*1100* sizeof (int))); dim3 block(448); dim3 grid(size); clock_gettime(CLOCK_MONOTONIC_RAW,&start); calculate_cigar<<<grid,block>>> (size,data_d,num_add_d,result_d,direction); //result // calculate_cigar_2<<<grid,block>>> (size,result_d,cigar,cigar_int,direction); //result cudaMemcpy(result_h,result_d,size*sizeof(int)*4,cudaMemcpyDeviceToHost); cudaMemcpy(cigar_h,cigar,128*sizeof(char)*size, cudaMemcpyDeviceToHost); cudaMemcpy(cigar_int_h,cigar_int,128*sizeof(int)*size,cudaMemcpyDeviceToHost); clock_gettime(CLOCK_MONOTONIC_RAW,&finish); computation_time+=diff(start,finish); /* for(int i=0;i<size;i++) { printf("%d\n",result_h[i*4+1]); printf("["); for(int j=0;j<result_h[i*4+3];j++) { if(j!=0) printf(", "); printf("%d%c",cigar_int_h[128*i+j],cigar_h[128*i+j]); } printf("]\n"); } */ cudaFree(direction); free(data_h_total); cudaFree(data_d_total); free(inputdata); cudaFree(cigar); free(cigar_int_h); free(cigar_h); // fscanf(file,"%d",&size); } printf(" computation_time= %e %d GCUPs=%lf\n",computation_time,total_size,( (double)total_size)/computation_time/1000000000); return 0; }
5a31efc0cc5b2ad5bb6b1dab21a19e3f51c6f593.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include <cstdio> #define DLIMIT 99999999 // Cluster Center // // float* f; // vector of size #channels // float x, y, z; // #define __min(a, b) (((a) < (b)) ? (a) : (b)) #define __max(a, b) (((a) > (b)) ? (a) : (b)) /* * P = point * S = data shape * F = data # features */ __device__ float at(const float* data, const int4& P, const int3& S) { long s2d = S.y * S.x, s3d = S.z * S.y * S.x; return data[P.w * s3d + P.z * s2d + P.y * S.x + P.x]; } __device__ float gradient(const float* data, int4& P, const int3& S, int nf) { float d; float3 diff; int4 q; q.z = P.z; q.y = P.y; q.x = P.x; for ( int k = 0; k < nf; k++ ) { q.w = P.w = k; q.x = P.x + 1; d = at(data, P, S) - at(data, q, S); diff.x += d * d; q.x = P.x; q.y = P.y + 1; d = at(data, P, S) - at(data, q, S); diff.y += d * d; q.y = P.y; q.z = P.z + 1; d = at(data, P, S) - at(data, q, S); diff.z += d * d; } return diff.x + diff.y + diff.z; } __global__ void init_clusters(const float* data, float* centers, int n_clusters, int n_features, const int3 sp_grid, const int3 sp_shape, const int3 im_shape) { long lidx = threadIdx.x + (blockIdx.x * blockDim.x); if ( lidx >= n_clusters ) { return; } int3 idx; int plane = sp_grid.y * sp_grid.x; int aux = lidx % plane; idx.z = lidx / plane; idx.y = aux / sp_grid.x; idx.x = aux % sp_grid.x; int4 p, q, r; p.z = r.z = idx.z * sp_shape.z + sp_shape.z / 2; p.y = r.y = idx.y * sp_shape.y + sp_shape.y / 2; p.z = r.x = idx.x * sp_shape.x + sp_shape.x / 2; float g, min_g = DLIMIT; for ( int u = -3; u <= 3; u++ ) { q.z = p.z + u; if ( q.z < 0 || q.z >= im_shape.z - 2 ) {continue;} for ( int v = -3; v <= 3; v ++ ) { q.y = p.y + v; if ( q.y < 0 || q.y >= im_shape.y - 2 ) {continue;} for ( int w = -3; w <= 3; w++ ) { q.x = p.x + w; if ( q.x < 0 || q.x >= im_shape.x - 2 ) {continue;} g = gradient(data, q, im_shape, n_features); if ( g < min_g ) { min_g = g; r.z = q.z; r.y = q.y; r.x = q.x; } } } } int shift = n_features + 3; for ( int k = 0; k < n_features; k++ ) { r.w = k; centers[lidx * shift + k] = at(data, r, im_shape); } centers[lidx * shift + n_features + 0] = r.z; centers[lidx * shift + n_features + 1] = r.y; centers[lidx * shift + n_features + 2] = r.x; } __global__ void expectation(const float* data, const float* centers, unsigned int* labels, float m, float S, int n_clusters, int n_features, const float3 spacing, const int3 sp_grid, const int3 sp_shape, const int3 im_shape) { int4 idx, p, q; long gidx = threadIdx.x + (blockIdx.x * blockDim.x); if ( gidx >= im_shape.x * im_shape.y * im_shape.z ) { return; } int plane = im_shape.y * im_shape.x; int aux = gidx % plane; idx.z = gidx / plane; idx.y = aux / im_shape.x; idx.x = aux % im_shape.x; p.z = __max(0, __min(idx.z / sp_shape.z, sp_grid.z - 1)); p.y = __max(0, __min(idx.y / sp_shape.y, sp_grid.y - 1)); p.x = __max(0, __min(idx.x / sp_shape.x, sp_grid.x - 1)); float min_d = DLIMIT, d, dist, adiff, pdiff; int R = 2, cshift = n_features + 3; long cidx, ridx = 0; for ( int k = -R; k <= R; k++ ) { q.z = p.z + k; if ( q.z < 0 || q.z >= sp_grid.z ) {continue;} for ( int i = -R; i <= R; i++ ) { q.y = p.y + i; if ( q.y < 0 || q.y >= sp_grid.y ) {continue;} for ( int j = -R; j <= R; j++ ) { q.x = p.x + j; if ( q.x < 0 || q.x >= sp_grid.x ) {continue;} cidx = q.z * sp_grid.y * sp_grid.x + q.y * sp_grid.x + q.x; if ( centers[cidx * cshift] == DLIMIT ) { continue; } // Appearance diff adiff = 0; for ( int w = 0; w < n_features; w++ ) { idx.w = w; d = at(data, idx, im_shape) - centers[cidx * cshift + w]; adiff += d * d; } // Position diff float3 pd; pd.z = (idx.z - centers[cidx * cshift + n_features + 0]) * spacing.z; pd.y = (idx.y - centers[cidx * cshift + n_features + 1]) * spacing.y; pd.x = (idx.x - centers[cidx * cshift + n_features + 2]) * spacing.x; pdiff = pd.z * pd.z + pd.y * pd.y + pd.x * pd.x; dist = adiff / (m * m * n_features * n_features) + pdiff / (S * S); // Wrapup if ( dist < min_d ) { min_d = dist; ridx = cidx; } } } } labels[gidx] = ridx + 1; } __global__ void maximization(const float* data, const unsigned int* labels, float* centers, int n_clusters, int n_features, const int3 sp_grid, const int3 sp_shape, const int3 im_shape) { long lidx = threadIdx.x + (blockIdx.x * blockDim.x); if ( lidx >= n_clusters ) { return; } long cshift = n_features + 3; int3 cidx; cidx.z = (int) centers[lidx * cshift + n_features + 0]; cidx.y = (int) centers[lidx * cshift + n_features + 1]; cidx.x = (int) centers[lidx * cshift + n_features + 2]; float ratio = 2.0f; int3 from; from.z = __max(cidx.z - sp_shape.z * ratio, 0); from.y = __max(cidx.y - sp_shape.y * ratio, 0); from.x = __max(cidx.x - sp_shape.x * ratio, 0); int3 to; to.z = __min(cidx.z + sp_shape.z * ratio, im_shape.z); to.y = __min(cidx.y + sp_shape.y * ratio, im_shape.y); to.x = __min(cidx.x + sp_shape.x * ratio, im_shape.x); int4 p; float* f = new float[cshift]; for ( int k = 0; k < cshift; k++ ) {f[k] = 0;} long count = 0, offset, s2d = im_shape.x * im_shape.y; for ( p.z = from.z; p.z < to.z; p.z++ ) { for ( p.y = from.y; p.y < to.y; p.y++ ) { for ( p.x = from.x; p.x < to.x; p.x++ ) { offset = p.z * s2d + p.y * im_shape.x + p.x; if ( labels[offset] == lidx + 1 ) { for ( int w = 0; w < n_features; w++ ) { p.w = w; f[w] += at(data, p, im_shape); } f[n_features + 0] += p.z; f[n_features + 1] += p.y; f[n_features + 2] += p.x; count += 1; } } } } if ( count > 0 ) { for ( int w = 0; w < cshift; w++ ) { centers[lidx * cshift + w] = f[w] / count; } } else { centers[lidx * cshift] = DLIMIT; } delete[] f; }
5a31efc0cc5b2ad5bb6b1dab21a19e3f51c6f593.cu
#include <cstdio> #define DLIMIT 99999999 // Cluster Center // // float* f; // vector of size #channels // float x, y, z; // #define __min(a, b) (((a) < (b)) ? (a) : (b)) #define __max(a, b) (((a) > (b)) ? (a) : (b)) /* * P = point * S = data shape * F = data # features */ __device__ float at(const float* data, const int4& P, const int3& S) { long s2d = S.y * S.x, s3d = S.z * S.y * S.x; return data[P.w * s3d + P.z * s2d + P.y * S.x + P.x]; } __device__ float gradient(const float* data, int4& P, const int3& S, int nf) { float d; float3 diff; int4 q; q.z = P.z; q.y = P.y; q.x = P.x; for ( int k = 0; k < nf; k++ ) { q.w = P.w = k; q.x = P.x + 1; d = at(data, P, S) - at(data, q, S); diff.x += d * d; q.x = P.x; q.y = P.y + 1; d = at(data, P, S) - at(data, q, S); diff.y += d * d; q.y = P.y; q.z = P.z + 1; d = at(data, P, S) - at(data, q, S); diff.z += d * d; } return diff.x + diff.y + diff.z; } __global__ void init_clusters(const float* data, float* centers, int n_clusters, int n_features, const int3 sp_grid, const int3 sp_shape, const int3 im_shape) { long lidx = threadIdx.x + (blockIdx.x * blockDim.x); if ( lidx >= n_clusters ) { return; } int3 idx; int plane = sp_grid.y * sp_grid.x; int aux = lidx % plane; idx.z = lidx / plane; idx.y = aux / sp_grid.x; idx.x = aux % sp_grid.x; int4 p, q, r; p.z = r.z = idx.z * sp_shape.z + sp_shape.z / 2; p.y = r.y = idx.y * sp_shape.y + sp_shape.y / 2; p.z = r.x = idx.x * sp_shape.x + sp_shape.x / 2; float g, min_g = DLIMIT; for ( int u = -3; u <= 3; u++ ) { q.z = p.z + u; if ( q.z < 0 || q.z >= im_shape.z - 2 ) {continue;} for ( int v = -3; v <= 3; v ++ ) { q.y = p.y + v; if ( q.y < 0 || q.y >= im_shape.y - 2 ) {continue;} for ( int w = -3; w <= 3; w++ ) { q.x = p.x + w; if ( q.x < 0 || q.x >= im_shape.x - 2 ) {continue;} g = gradient(data, q, im_shape, n_features); if ( g < min_g ) { min_g = g; r.z = q.z; r.y = q.y; r.x = q.x; } } } } int shift = n_features + 3; for ( int k = 0; k < n_features; k++ ) { r.w = k; centers[lidx * shift + k] = at(data, r, im_shape); } centers[lidx * shift + n_features + 0] = r.z; centers[lidx * shift + n_features + 1] = r.y; centers[lidx * shift + n_features + 2] = r.x; } __global__ void expectation(const float* data, const float* centers, unsigned int* labels, float m, float S, int n_clusters, int n_features, const float3 spacing, const int3 sp_grid, const int3 sp_shape, const int3 im_shape) { int4 idx, p, q; long gidx = threadIdx.x + (blockIdx.x * blockDim.x); if ( gidx >= im_shape.x * im_shape.y * im_shape.z ) { return; } int plane = im_shape.y * im_shape.x; int aux = gidx % plane; idx.z = gidx / plane; idx.y = aux / im_shape.x; idx.x = aux % im_shape.x; p.z = __max(0, __min(idx.z / sp_shape.z, sp_grid.z - 1)); p.y = __max(0, __min(idx.y / sp_shape.y, sp_grid.y - 1)); p.x = __max(0, __min(idx.x / sp_shape.x, sp_grid.x - 1)); float min_d = DLIMIT, d, dist, adiff, pdiff; int R = 2, cshift = n_features + 3; long cidx, ridx = 0; for ( int k = -R; k <= R; k++ ) { q.z = p.z + k; if ( q.z < 0 || q.z >= sp_grid.z ) {continue;} for ( int i = -R; i <= R; i++ ) { q.y = p.y + i; if ( q.y < 0 || q.y >= sp_grid.y ) {continue;} for ( int j = -R; j <= R; j++ ) { q.x = p.x + j; if ( q.x < 0 || q.x >= sp_grid.x ) {continue;} cidx = q.z * sp_grid.y * sp_grid.x + q.y * sp_grid.x + q.x; if ( centers[cidx * cshift] == DLIMIT ) { continue; } // Appearance diff adiff = 0; for ( int w = 0; w < n_features; w++ ) { idx.w = w; d = at(data, idx, im_shape) - centers[cidx * cshift + w]; adiff += d * d; } // Position diff float3 pd; pd.z = (idx.z - centers[cidx * cshift + n_features + 0]) * spacing.z; pd.y = (idx.y - centers[cidx * cshift + n_features + 1]) * spacing.y; pd.x = (idx.x - centers[cidx * cshift + n_features + 2]) * spacing.x; pdiff = pd.z * pd.z + pd.y * pd.y + pd.x * pd.x; dist = adiff / (m * m * n_features * n_features) + pdiff / (S * S); // Wrapup if ( dist < min_d ) { min_d = dist; ridx = cidx; } } } } labels[gidx] = ridx + 1; } __global__ void maximization(const float* data, const unsigned int* labels, float* centers, int n_clusters, int n_features, const int3 sp_grid, const int3 sp_shape, const int3 im_shape) { long lidx = threadIdx.x + (blockIdx.x * blockDim.x); if ( lidx >= n_clusters ) { return; } long cshift = n_features + 3; int3 cidx; cidx.z = (int) centers[lidx * cshift + n_features + 0]; cidx.y = (int) centers[lidx * cshift + n_features + 1]; cidx.x = (int) centers[lidx * cshift + n_features + 2]; float ratio = 2.0f; int3 from; from.z = __max(cidx.z - sp_shape.z * ratio, 0); from.y = __max(cidx.y - sp_shape.y * ratio, 0); from.x = __max(cidx.x - sp_shape.x * ratio, 0); int3 to; to.z = __min(cidx.z + sp_shape.z * ratio, im_shape.z); to.y = __min(cidx.y + sp_shape.y * ratio, im_shape.y); to.x = __min(cidx.x + sp_shape.x * ratio, im_shape.x); int4 p; float* f = new float[cshift]; for ( int k = 0; k < cshift; k++ ) {f[k] = 0;} long count = 0, offset, s2d = im_shape.x * im_shape.y; for ( p.z = from.z; p.z < to.z; p.z++ ) { for ( p.y = from.y; p.y < to.y; p.y++ ) { for ( p.x = from.x; p.x < to.x; p.x++ ) { offset = p.z * s2d + p.y * im_shape.x + p.x; if ( labels[offset] == lidx + 1 ) { for ( int w = 0; w < n_features; w++ ) { p.w = w; f[w] += at(data, p, im_shape); } f[n_features + 0] += p.z; f[n_features + 1] += p.y; f[n_features + 2] += p.x; count += 1; } } } } if ( count > 0 ) { for ( int w = 0; w < cshift; w++ ) { centers[lidx * cshift + w] = f[w] / count; } } else { centers[lidx * cshift] = DLIMIT; } delete[] f; }
5395a121777ddde8b587fa63e94c23c1fed2d144.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" // BSD 3-Clause License; see https://github.com/scikit-hep/awkward-1.0/blob/main/LICENSE template <typename T, typename C, typename U> __global__ void awkward_ListArray_num(T* tonum, const C* fromstarts, const U* fromstops, int64_t length, uint64_t invocation_index, uint64_t* err_code) { int64_t thread_id = blockIdx.x * blockDim.x + threadIdx.x; if (err_code[0] == NO_ERROR) { if (thread_id < length) { int64_t start = fromstarts[thread_id]; int64_t stop = fromstops[thread_id]; tonum[thread_id] = (C)(stop - start); } } }
5395a121777ddde8b587fa63e94c23c1fed2d144.cu
// BSD 3-Clause License; see https://github.com/scikit-hep/awkward-1.0/blob/main/LICENSE template <typename T, typename C, typename U> __global__ void awkward_ListArray_num(T* tonum, const C* fromstarts, const U* fromstops, int64_t length, uint64_t invocation_index, uint64_t* err_code) { int64_t thread_id = blockIdx.x * blockDim.x + threadIdx.x; if (err_code[0] == NO_ERROR) { if (thread_id < length) { int64_t start = fromstarts[thread_id]; int64_t stop = fromstops[thread_id]; tonum[thread_id] = (C)(stop - start); } } }
9f077d9b65d6feeeed35ab8b0e40a94b0ae2c096.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /************************************************************************* GPU Version: Tsinghua University, Aug. 2012. Written by Yun Fei in collaboration with W. Wang and B. Wang Original: Optimization Technology Center. Argonne National Laboratory and Northwestern University. Written by Ciyou Zhu in collaboration with R.H. Byrd, P. Lu-Chen and J. Nocedal. Contributors: * Sergey Bochkanov (ALGLIB project). Translation from FORTRAN to pseudocode. This software is freely available, but we expect that all publications describing work using this software, or all commercial products using it, quote at least one of the references given below: * R. H. Byrd, P. Lu and J. Nocedal. A Limited Memory Algorithm for Bound Constrained Optimization, (1995), SIAM Journal on Scientific and Statistical Computing , 16, 5, pp. 1190-1208. * C. Zhu, R.H. Byrd and J. Nocedal. L-BFGS-B: Algorithm 778: L-BFGS-B, FORTRAN routines for large scale bound constrained optimization (1997), ACM Transactions on Mathematical Software, Vol 23, Num. 4, pp. 550 - 560. *************************************************************************/ #include "lbfgsbcuda.h" namespace lbfgsbcuda { namespace cmprlb { __global__ void kernel0( int n, real* r, const real* g) { const int i = blockIdx.x * blockDim.x + threadIdx.x; if(i >= n) return; r[i] = -g[i]; } template<int bsize> __global__ void kernel1( int nfree, const int* index, const int col, const int head, const int m, const int iPitch, const real* wa, const real* wy, const real* ws, const real theta, const real* z, const real* x, const real* g, real* r ) { const int i = blockIdx.x * blockDim.y + threadIdx.y; const int tidx = threadIdx.x; //8 const int tidy = threadIdx.y; //64 volatile __shared__ real sdata[(512 / bsize)][bsize+1]; __shared__ real a[2][bsize+1]; real mySum; if(tidy == 0 && tidx < col) { a[0][tidx] = wa[tidx]; a[1][tidx] = theta * wa[col + tidx]; } int k = 0; if(i < nfree && tidx < col) { const int pointr = Modular((head + tidx), m); k = index[i]; __syncthreads(); mySum = wy[k * iPitch + pointr] * a[0][tidx] + ws[k * iPitch + pointr] * a[1][tidx]; } else mySum = 0; if(bsize > 1) { volatile real* smem = sdata[tidy] + tidx; *smem = mySum; __syncthreads(); if(bsize > 4) {*smem = mySum = mySum + smem[4];} if(bsize > 2) {*smem = mySum = mySum + smem[2];} if(bsize > 1) {*smem = mySum = mySum + smem[1];} } if(tidx == 0 && i < nfree) { r[i] = -theta * (z[k] - x[k]) - g[k] + mySum; } } void prog0( const int n, real* r, const real* g, const hipStream_t& stream ) { hipLaunchKernelGGL(( kernel0), dim3(dim3(iDivUp(n, 512))), dim3(dim3(512)), 0, stream, n, r, g); } void prog1( const int nfree, const int* index, const int col, const int head, const int m, const int iPitch, const real* wa, const real* wy, const real* ws, const real theta, const real* z, const real* x, const real* g, real* r, const hipStream_t& stream ) { if(col > 4) { int nblocky = 512 / 8; hipLaunchKernelGGL(( kernel1<8>), dim3(dim3(iDivUp(nfree, nblocky))), dim3(dim3(8, nblocky)), 0, stream, nfree, index, col, head, m, iPitch, wa, wy, ws, theta, z, x, g, r); } else if(col > 2) { int nblocky = 512 / 4; hipLaunchKernelGGL(( kernel1<4>), dim3(dim3(iDivUp(nfree, nblocky))), dim3(dim3(4, nblocky)), 0, stream, nfree, index, col, head, m, iPitch, wa, wy, ws, theta, z, x, g, r); } else if(col > 1) { int nblocky = 512 / 2; hipLaunchKernelGGL(( kernel1<2>), dim3(dim3(iDivUp(nfree, nblocky))), dim3(dim3(2, nblocky)), 0, stream, nfree, index, col, head, m, iPitch, wa, wy, ws, theta, z, x, g, r); } else if(col == 1){ int nblocky = 512 / 1; hipLaunchKernelGGL(( kernel1<1>), dim3(dim3(iDivUp(nfree, nblocky))), dim3(dim3(1, nblocky)), 0, stream, nfree, index, col, head, m, iPitch, wa, wy, ws, theta, z, x, g, r); } } }; };
9f077d9b65d6feeeed35ab8b0e40a94b0ae2c096.cu
/************************************************************************* GPU Version: Tsinghua University, Aug. 2012. Written by Yun Fei in collaboration with W. Wang and B. Wang Original: Optimization Technology Center. Argonne National Laboratory and Northwestern University. Written by Ciyou Zhu in collaboration with R.H. Byrd, P. Lu-Chen and J. Nocedal. Contributors: * Sergey Bochkanov (ALGLIB project). Translation from FORTRAN to pseudocode. This software is freely available, but we expect that all publications describing work using this software, or all commercial products using it, quote at least one of the references given below: * R. H. Byrd, P. Lu and J. Nocedal. A Limited Memory Algorithm for Bound Constrained Optimization, (1995), SIAM Journal on Scientific and Statistical Computing , 16, 5, pp. 1190-1208. * C. Zhu, R.H. Byrd and J. Nocedal. L-BFGS-B: Algorithm 778: L-BFGS-B, FORTRAN routines for large scale bound constrained optimization (1997), ACM Transactions on Mathematical Software, Vol 23, Num. 4, pp. 550 - 560. *************************************************************************/ #include "lbfgsbcuda.h" namespace lbfgsbcuda { namespace cmprlb { __global__ void kernel0( int n, real* r, const real* g) { const int i = blockIdx.x * blockDim.x + threadIdx.x; if(i >= n) return; r[i] = -g[i]; } template<int bsize> __global__ void kernel1( int nfree, const int* index, const int col, const int head, const int m, const int iPitch, const real* wa, const real* wy, const real* ws, const real theta, const real* z, const real* x, const real* g, real* r ) { const int i = blockIdx.x * blockDim.y + threadIdx.y; const int tidx = threadIdx.x; //8 const int tidy = threadIdx.y; //64 volatile __shared__ real sdata[(512 / bsize)][bsize+1]; __shared__ real a[2][bsize+1]; real mySum; if(tidy == 0 && tidx < col) { a[0][tidx] = wa[tidx]; a[1][tidx] = theta * wa[col + tidx]; } int k = 0; if(i < nfree && tidx < col) { const int pointr = Modular((head + tidx), m); k = index[i]; __syncthreads(); mySum = wy[k * iPitch + pointr] * a[0][tidx] + ws[k * iPitch + pointr] * a[1][tidx]; } else mySum = 0; if(bsize > 1) { volatile real* smem = sdata[tidy] + tidx; *smem = mySum; __syncthreads(); if(bsize > 4) {*smem = mySum = mySum + smem[4];} if(bsize > 2) {*smem = mySum = mySum + smem[2];} if(bsize > 1) {*smem = mySum = mySum + smem[1];} } if(tidx == 0 && i < nfree) { r[i] = -theta * (z[k] - x[k]) - g[k] + mySum; } } void prog0( const int n, real* r, const real* g, const cudaStream_t& stream ) { kernel0<<<dim3(iDivUp(n, 512)), dim3(512), 0, stream>>> (n, r, g); } void prog1( const int nfree, const int* index, const int col, const int head, const int m, const int iPitch, const real* wa, const real* wy, const real* ws, const real theta, const real* z, const real* x, const real* g, real* r, const cudaStream_t& stream ) { if(col > 4) { int nblocky = 512 / 8; kernel1<8><<<dim3(iDivUp(nfree, nblocky)), dim3(8, nblocky), 0, stream>>> (nfree, index, col, head, m, iPitch, wa, wy, ws, theta, z, x, g, r); } else if(col > 2) { int nblocky = 512 / 4; kernel1<4><<<dim3(iDivUp(nfree, nblocky)), dim3(4, nblocky), 0, stream>>> (nfree, index, col, head, m, iPitch, wa, wy, ws, theta, z, x, g, r); } else if(col > 1) { int nblocky = 512 / 2; kernel1<2><<<dim3(iDivUp(nfree, nblocky)), dim3(2, nblocky), 0, stream>>> (nfree, index, col, head, m, iPitch, wa, wy, ws, theta, z, x, g, r); } else if(col == 1){ int nblocky = 512 / 1; kernel1<1><<<dim3(iDivUp(nfree, nblocky)), dim3(1, nblocky), 0, stream>>> (nfree, index, col, head, m, iPitch, wa, wy, ws, theta, z, x, g, r); } } }; };
cf65d93c084e1c01baabbef0979c25b2791c49d2.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /* * Copyright (c) 2019-2020, NVIDIA CORPORATION. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #include <db/db_operators.cuh> #include <utilities/error.hpp> #include <raft/cudart_utils.h> #include <hipcub/hipcub.hpp> namespace cugraph { namespace db { template <typename IndexType> struct degree_iterator { IndexType* offsets; degree_iterator(IndexType* _offsets) : offsets(_offsets) {} __host__ __device__ IndexType operator[](IndexType place) { return offsets[place + 1] - offsets[place]; } }; template <typename It, typename IndexType> struct deref_functor { It iterator; deref_functor(It it) : iterator(it) {} __host__ __device__ IndexType operator()(IndexType in) { return iterator[in]; } }; template <typename idx_t, typename flag_t> struct notNegativeOne { __host__ __device__ flag_t operator()(idx_t in) { return in != -1; } }; template <typename IndexType> __device__ IndexType binsearch_maxle(const IndexType* vec, const IndexType val, IndexType low, IndexType high) { while (true) { if (low == high) return low; // we know it exists if ((low + 1) == high) return (vec[high] <= val) ? high : low; IndexType mid = low + (high - low) / 2; if (vec[mid] > val) high = mid - 1; else low = mid; } } template <typename IndexType> __global__ void compute_bucket_offsets_kernel(const IndexType* frontier_degrees_exclusive_sum, IndexType* bucket_offsets, const IndexType frontier_size, IndexType total_degree) { IndexType end = ((total_degree - 1 + FIND_MATCHES_BLOCK_SIZE) / FIND_MATCHES_BLOCK_SIZE); for (IndexType bid = blockIdx.x * blockDim.x + threadIdx.x; bid <= end; bid += gridDim.x * blockDim.x) { IndexType eid = min(bid * FIND_MATCHES_BLOCK_SIZE, total_degree - 1); bucket_offsets[bid] = binsearch_maxle(frontier_degrees_exclusive_sum, eid, (IndexType)0, frontier_size - 1); } } template <typename idx_t> __global__ void findMatchesKernel(idx_t inputSize, idx_t outputSize, idx_t maxBlock, idx_t* offsets, idx_t* indirection, idx_t* blockStarts, idx_t* expandCounts, idx_t* frontier, idx_t* columnA, idx_t* columnB, idx_t* columnC, idx_t* outputA, idx_t* outputB, idx_t* outputC, idx_t* outputD, idx_t patternA, idx_t patternB, idx_t patternC) { __shared__ idx_t blockRange[2]; __shared__ idx_t localExSum[FIND_MATCHES_BLOCK_SIZE * 2]; __shared__ idx_t localFrontier[FIND_MATCHES_BLOCK_SIZE * 2]; for (idx_t bid = blockIdx.x; bid < maxBlock; bid += gridDim.x) { // Copy in the block's section of the expand counts if (threadIdx.x == 0) { blockRange[0] = blockStarts[bid]; blockRange[1] = blockStarts[bid + 1]; if (blockRange[0] > 0) { blockRange[0] -= 1; } } __syncthreads(); idx_t sectionSize = blockRange[1] - blockRange[0]; for (int tid = threadIdx.x; tid <= sectionSize; tid += blockDim.x) { localExSum[tid] = expandCounts[blockRange[0] + tid]; localFrontier[tid] = frontier[blockRange[0] + tid]; } __syncthreads(); // Do the work item for each thread of this virtual block: idx_t tid = bid * blockDim.x + threadIdx.x; if (tid < outputSize) { // Figure out which row this thread/iteration is working on idx_t sourceIdx = binsearch_maxle(localExSum, tid, (idx_t)0, (idx_t)sectionSize); idx_t source = localFrontier[sourceIdx]; idx_t rank = tid - localExSum[sourceIdx]; idx_t row_id = indirection[offsets[source] + rank]; // Load in values from the row for A, B, and C columns idx_t valA = columnA[row_id]; idx_t valB = columnB[row_id]; idx_t valC = columnC[row_id]; // Compare the row values with constants in the pattern bool matchA = outputA != nullptr ? true : patternA == valA; bool matchB = outputB != nullptr ? true : patternB == valB; bool matchC = outputC != nullptr ? true : patternC == valC; // If row doesn't match, set row values to -1 before writing out if (!(matchA && matchB && matchC)) { valA = -1; valB = -1; valC = -1; row_id = -1; } // Write out values to non-null outputs if (outputA != nullptr) outputA[tid] = valA; if (outputB != nullptr) outputB[tid] = valB; if (outputC != nullptr) outputC[tid] = valC; if (outputD != nullptr) outputD[tid] = row_id; } } } template <typename idx_t> db_result<idx_t> findMatches(db_pattern<idx_t>& pattern, db_table<idx_t>& table, idx_t* frontier, idx_t frontier_size, int indexPosition) { // Find out if the indexPosition is a variable or constant bool indexConstant = !pattern.getEntry(indexPosition).isVariable(); db_column_index<idx_t>& theIndex = table.getIndex(indexPosition); // Check to see whether we are going to be saving out the row ids from matches bool saveRowIds = false; if (pattern.getSize() == 4) saveRowIds = true; // Check if we have a frontier to use, if we don't make one up bool givenInputFrontier = frontier != nullptr; idx_t frontierSize; idx_t* frontier_ptr = nullptr; rmm::device_buffer frontierBuffer; if (givenInputFrontier) { frontier_ptr = frontier; frontierSize = frontier_size; } else { if (indexConstant) { // Use a single value equal to the constant in the pattern idx_t constantValue = pattern.getEntry(indexPosition).getConstant(); frontierBuffer.resize(sizeof(idx_t)); thrust::fill(rmm::exec_policy(nullptr)->on(nullptr), reinterpret_cast<idx_t*>(frontierBuffer.data()), reinterpret_cast<idx_t*>(frontierBuffer.data()) + 1, constantValue); frontier_ptr = reinterpret_cast<idx_t*>(frontierBuffer.data()); frontierSize = 1; } else { // Making a sequence of values from zero to n where n is the highest ID present in the index. idx_t highestId = theIndex.getOffsetsSize() - 2; frontierBuffer.resize(sizeof(idx_t) * (highestId + 1)); thrust::sequence(rmm::exec_policy(nullptr)->on(nullptr), reinterpret_cast<idx_t*>(frontierBuffer.data()), reinterpret_cast<idx_t*>(frontierBuffer.data()) + highestId + 1); frontier_ptr = reinterpret_cast<idx_t*>(frontierBuffer.data()); frontierSize = highestId + 1; } } // Collect all the pointers needed to run the main kernel idx_t* columnA = table.getColumn(0); idx_t* columnB = table.getColumn(1); idx_t* columnC = table.getColumn(2); idx_t* offsets = theIndex.getOffsets(); idx_t* indirection = theIndex.getIndirection(); // Load balance the input rmm::device_buffer exsum_degree(sizeof(idx_t) * (frontierSize + 1)); degree_iterator<idx_t> deg_it(offsets); deref_functor<degree_iterator<idx_t>, idx_t> deref(deg_it); thrust::fill(rmm::exec_policy(nullptr)->on(nullptr), reinterpret_cast<idx_t*>(exsum_degree.data()), reinterpret_cast<idx_t*>(exsum_degree.data()) + 1, 0); thrust::transform(rmm::exec_policy(nullptr)->on(nullptr), frontier_ptr, frontier_ptr + frontierSize, reinterpret_cast<idx_t*>(exsum_degree.data()) + 1, deref); thrust::inclusive_scan(rmm::exec_policy(nullptr)->on(nullptr), reinterpret_cast<idx_t*>(exsum_degree.data()) + 1, reinterpret_cast<idx_t*>(exsum_degree.data()) + frontierSize + 1, reinterpret_cast<idx_t*>(exsum_degree.data()) + 1); idx_t output_size; CUDA_TRY(hipMemcpy(&output_size, reinterpret_cast<idx_t*>(exsum_degree.data()) + frontierSize, sizeof(idx_t), hipMemcpyDefault)); idx_t num_blocks = (output_size + FIND_MATCHES_BLOCK_SIZE - 1) / FIND_MATCHES_BLOCK_SIZE; rmm::device_buffer block_bucket_offsets(sizeof(idx_t) * (num_blocks + 1)); dim3 grid, block; block.x = 512; grid.x = min((idx_t)MAXBLOCKS, (num_blocks / 512) + 1); hipLaunchKernelGGL(( compute_bucket_offsets_kernel), dim3(grid), dim3(block), 0, nullptr, reinterpret_cast<idx_t*>(exsum_degree.data()), reinterpret_cast<idx_t*>(block_bucket_offsets.data()), frontierSize, output_size); // Allocate space for the result idx_t* outputA = nullptr; idx_t* outputB = nullptr; idx_t* outputC = nullptr; idx_t* outputD = nullptr; rmm::device_buffer outputABuffer; rmm::device_buffer outputBBuffer; rmm::device_buffer outputCBuffer; rmm::device_buffer outputDBuffer; if (pattern.getEntry(0).isVariable()) { outputABuffer.resize(sizeof(idx_t) * output_size); outputA = reinterpret_cast<idx_t*>(outputABuffer.data()); } if (pattern.getEntry(1).isVariable()) { outputBBuffer.resize(sizeof(idx_t) * output_size); outputB = reinterpret_cast<idx_t*>(outputBBuffer.data()); } if (pattern.getEntry(2).isVariable()) { outputCBuffer.resize(sizeof(idx_t) * output_size); outputC = reinterpret_cast<idx_t*>(outputCBuffer.data()); } if (saveRowIds) { outputDBuffer.resize(sizeof(idx_t) * output_size); outputD = reinterpret_cast<idx_t*>(outputDBuffer.data()); } // Get the constant pattern entries from the pattern to pass into the main kernel idx_t patternA = -1; idx_t patternB = -1; idx_t patternC = -1; if (!pattern.getEntry(0).isVariable()) { patternA = pattern.getEntry(0).getConstant(); } if (!pattern.getEntry(1).isVariable()) { patternB = pattern.getEntry(1).getConstant(); } if (!pattern.getEntry(2).isVariable()) { patternC = pattern.getEntry(2).getConstant(); } // Call the main kernel block.x = FIND_MATCHES_BLOCK_SIZE; grid.x = min((idx_t)MAXBLOCKS, (output_size + (idx_t)FIND_MATCHES_BLOCK_SIZE - 1) / (idx_t)FIND_MATCHES_BLOCK_SIZE); hipLaunchKernelGGL(( findMatchesKernel), dim3(grid), dim3(block), 0, nullptr, frontierSize, output_size, num_blocks, offsets, indirection, reinterpret_cast<idx_t*>(block_bucket_offsets.data()), reinterpret_cast<idx_t*>(exsum_degree.data()), frontier_ptr, columnA, columnB, columnC, outputA, outputB, outputC, outputD, patternA, patternB, patternC); // Get the non-null output columns std::vector<idx_t*> columns; std::vector<std::string> names; if (outputA != nullptr) { columns.push_back(outputA); names.push_back(pattern.getEntry(0).getVariable()); } if (outputB != nullptr) { columns.push_back(outputB); names.push_back(pattern.getEntry(1).getVariable()); } if (outputC != nullptr) { columns.push_back(outputC); names.push_back(pattern.getEntry(2).getVariable()); } if (outputD != nullptr) { columns.push_back(outputD); names.push_back(pattern.getEntry(3).getVariable()); } // Remove non-matches from result rmm::device_buffer flags(sizeof(int8_t) * output_size); idx_t* col_ptr = columns[0]; thrust::transform(rmm::exec_policy(nullptr)->on(nullptr), col_ptr, col_ptr + output_size, reinterpret_cast<int8_t*>(flags.data()), notNegativeOne<idx_t, int8_t>()); size_t tempSpaceSize = 0; rmm::device_buffer compactSize_d(sizeof(idx_t)); hipcub::DeviceSelect::Flagged(nullptr, tempSpaceSize, col_ptr, reinterpret_cast<int8_t*>(flags.data()), col_ptr, reinterpret_cast<idx_t*>(compactSize_d.data()), output_size); rmm::device_buffer tempSpace(tempSpaceSize); hipcub::DeviceSelect::Flagged(tempSpace.data(), tempSpaceSize, col_ptr, reinterpret_cast<int8_t*>(flags.data()), col_ptr, reinterpret_cast<idx_t*>(compactSize_d.data()), output_size); idx_t compactSize_h; hipMemcpy(&compactSize_h, compactSize_d.data(), sizeof(idx_t), hipMemcpyDefault); for (size_t i = 1; i < columns.size(); i++) { col_ptr = columns[i]; hipcub::DeviceSelect::Flagged(tempSpace.data(), tempSpaceSize, col_ptr, reinterpret_cast<int8_t*>(flags.data()), col_ptr, reinterpret_cast<idx_t*>(compactSize_d.data()), output_size); } // Put together the result to return db_result<idx_t> result; for (size_t i = 0; i < names.size(); i++) { result.addColumn(names[i]); } result.allocateColumns(compactSize_h); for (size_t i = 0; i < columns.size(); i++) { idx_t* outputPtr = result.getData(names[i]); idx_t* inputPtr = columns[i]; CUDA_TRY(hipMemcpy(outputPtr, inputPtr, sizeof(idx_t) * compactSize_h, hipMemcpyDefault)); } // Return the result return result; } template db_result<int32_t> findMatches(db_pattern<int32_t>& pattern, db_table<int32_t>& table, int32_t* frontier, int32_t frontier_size, int indexPosition); template db_result<int64_t> findMatches(db_pattern<int64_t>& pattern, db_table<int64_t>& table, int64_t* frontier, int64_t frontier_size, int indexPosition); } // namespace db } // namespace cugraph
cf65d93c084e1c01baabbef0979c25b2791c49d2.cu
/* * Copyright (c) 2019-2020, NVIDIA CORPORATION. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #include <db/db_operators.cuh> #include <utilities/error.hpp> #include <raft/cudart_utils.h> #include <cub/device/device_select.cuh> namespace cugraph { namespace db { template <typename IndexType> struct degree_iterator { IndexType* offsets; degree_iterator(IndexType* _offsets) : offsets(_offsets) {} __host__ __device__ IndexType operator[](IndexType place) { return offsets[place + 1] - offsets[place]; } }; template <typename It, typename IndexType> struct deref_functor { It iterator; deref_functor(It it) : iterator(it) {} __host__ __device__ IndexType operator()(IndexType in) { return iterator[in]; } }; template <typename idx_t, typename flag_t> struct notNegativeOne { __host__ __device__ flag_t operator()(idx_t in) { return in != -1; } }; template <typename IndexType> __device__ IndexType binsearch_maxle(const IndexType* vec, const IndexType val, IndexType low, IndexType high) { while (true) { if (low == high) return low; // we know it exists if ((low + 1) == high) return (vec[high] <= val) ? high : low; IndexType mid = low + (high - low) / 2; if (vec[mid] > val) high = mid - 1; else low = mid; } } template <typename IndexType> __global__ void compute_bucket_offsets_kernel(const IndexType* frontier_degrees_exclusive_sum, IndexType* bucket_offsets, const IndexType frontier_size, IndexType total_degree) { IndexType end = ((total_degree - 1 + FIND_MATCHES_BLOCK_SIZE) / FIND_MATCHES_BLOCK_SIZE); for (IndexType bid = blockIdx.x * blockDim.x + threadIdx.x; bid <= end; bid += gridDim.x * blockDim.x) { IndexType eid = min(bid * FIND_MATCHES_BLOCK_SIZE, total_degree - 1); bucket_offsets[bid] = binsearch_maxle(frontier_degrees_exclusive_sum, eid, (IndexType)0, frontier_size - 1); } } template <typename idx_t> __global__ void findMatchesKernel(idx_t inputSize, idx_t outputSize, idx_t maxBlock, idx_t* offsets, idx_t* indirection, idx_t* blockStarts, idx_t* expandCounts, idx_t* frontier, idx_t* columnA, idx_t* columnB, idx_t* columnC, idx_t* outputA, idx_t* outputB, idx_t* outputC, idx_t* outputD, idx_t patternA, idx_t patternB, idx_t patternC) { __shared__ idx_t blockRange[2]; __shared__ idx_t localExSum[FIND_MATCHES_BLOCK_SIZE * 2]; __shared__ idx_t localFrontier[FIND_MATCHES_BLOCK_SIZE * 2]; for (idx_t bid = blockIdx.x; bid < maxBlock; bid += gridDim.x) { // Copy in the block's section of the expand counts if (threadIdx.x == 0) { blockRange[0] = blockStarts[bid]; blockRange[1] = blockStarts[bid + 1]; if (blockRange[0] > 0) { blockRange[0] -= 1; } } __syncthreads(); idx_t sectionSize = blockRange[1] - blockRange[0]; for (int tid = threadIdx.x; tid <= sectionSize; tid += blockDim.x) { localExSum[tid] = expandCounts[blockRange[0] + tid]; localFrontier[tid] = frontier[blockRange[0] + tid]; } __syncthreads(); // Do the work item for each thread of this virtual block: idx_t tid = bid * blockDim.x + threadIdx.x; if (tid < outputSize) { // Figure out which row this thread/iteration is working on idx_t sourceIdx = binsearch_maxle(localExSum, tid, (idx_t)0, (idx_t)sectionSize); idx_t source = localFrontier[sourceIdx]; idx_t rank = tid - localExSum[sourceIdx]; idx_t row_id = indirection[offsets[source] + rank]; // Load in values from the row for A, B, and C columns idx_t valA = columnA[row_id]; idx_t valB = columnB[row_id]; idx_t valC = columnC[row_id]; // Compare the row values with constants in the pattern bool matchA = outputA != nullptr ? true : patternA == valA; bool matchB = outputB != nullptr ? true : patternB == valB; bool matchC = outputC != nullptr ? true : patternC == valC; // If row doesn't match, set row values to -1 before writing out if (!(matchA && matchB && matchC)) { valA = -1; valB = -1; valC = -1; row_id = -1; } // Write out values to non-null outputs if (outputA != nullptr) outputA[tid] = valA; if (outputB != nullptr) outputB[tid] = valB; if (outputC != nullptr) outputC[tid] = valC; if (outputD != nullptr) outputD[tid] = row_id; } } } template <typename idx_t> db_result<idx_t> findMatches(db_pattern<idx_t>& pattern, db_table<idx_t>& table, idx_t* frontier, idx_t frontier_size, int indexPosition) { // Find out if the indexPosition is a variable or constant bool indexConstant = !pattern.getEntry(indexPosition).isVariable(); db_column_index<idx_t>& theIndex = table.getIndex(indexPosition); // Check to see whether we are going to be saving out the row ids from matches bool saveRowIds = false; if (pattern.getSize() == 4) saveRowIds = true; // Check if we have a frontier to use, if we don't make one up bool givenInputFrontier = frontier != nullptr; idx_t frontierSize; idx_t* frontier_ptr = nullptr; rmm::device_buffer frontierBuffer; if (givenInputFrontier) { frontier_ptr = frontier; frontierSize = frontier_size; } else { if (indexConstant) { // Use a single value equal to the constant in the pattern idx_t constantValue = pattern.getEntry(indexPosition).getConstant(); frontierBuffer.resize(sizeof(idx_t)); thrust::fill(rmm::exec_policy(nullptr)->on(nullptr), reinterpret_cast<idx_t*>(frontierBuffer.data()), reinterpret_cast<idx_t*>(frontierBuffer.data()) + 1, constantValue); frontier_ptr = reinterpret_cast<idx_t*>(frontierBuffer.data()); frontierSize = 1; } else { // Making a sequence of values from zero to n where n is the highest ID present in the index. idx_t highestId = theIndex.getOffsetsSize() - 2; frontierBuffer.resize(sizeof(idx_t) * (highestId + 1)); thrust::sequence(rmm::exec_policy(nullptr)->on(nullptr), reinterpret_cast<idx_t*>(frontierBuffer.data()), reinterpret_cast<idx_t*>(frontierBuffer.data()) + highestId + 1); frontier_ptr = reinterpret_cast<idx_t*>(frontierBuffer.data()); frontierSize = highestId + 1; } } // Collect all the pointers needed to run the main kernel idx_t* columnA = table.getColumn(0); idx_t* columnB = table.getColumn(1); idx_t* columnC = table.getColumn(2); idx_t* offsets = theIndex.getOffsets(); idx_t* indirection = theIndex.getIndirection(); // Load balance the input rmm::device_buffer exsum_degree(sizeof(idx_t) * (frontierSize + 1)); degree_iterator<idx_t> deg_it(offsets); deref_functor<degree_iterator<idx_t>, idx_t> deref(deg_it); thrust::fill(rmm::exec_policy(nullptr)->on(nullptr), reinterpret_cast<idx_t*>(exsum_degree.data()), reinterpret_cast<idx_t*>(exsum_degree.data()) + 1, 0); thrust::transform(rmm::exec_policy(nullptr)->on(nullptr), frontier_ptr, frontier_ptr + frontierSize, reinterpret_cast<idx_t*>(exsum_degree.data()) + 1, deref); thrust::inclusive_scan(rmm::exec_policy(nullptr)->on(nullptr), reinterpret_cast<idx_t*>(exsum_degree.data()) + 1, reinterpret_cast<idx_t*>(exsum_degree.data()) + frontierSize + 1, reinterpret_cast<idx_t*>(exsum_degree.data()) + 1); idx_t output_size; CUDA_TRY(cudaMemcpy(&output_size, reinterpret_cast<idx_t*>(exsum_degree.data()) + frontierSize, sizeof(idx_t), cudaMemcpyDefault)); idx_t num_blocks = (output_size + FIND_MATCHES_BLOCK_SIZE - 1) / FIND_MATCHES_BLOCK_SIZE; rmm::device_buffer block_bucket_offsets(sizeof(idx_t) * (num_blocks + 1)); dim3 grid, block; block.x = 512; grid.x = min((idx_t)MAXBLOCKS, (num_blocks / 512) + 1); compute_bucket_offsets_kernel<<<grid, block, 0, nullptr>>>( reinterpret_cast<idx_t*>(exsum_degree.data()), reinterpret_cast<idx_t*>(block_bucket_offsets.data()), frontierSize, output_size); // Allocate space for the result idx_t* outputA = nullptr; idx_t* outputB = nullptr; idx_t* outputC = nullptr; idx_t* outputD = nullptr; rmm::device_buffer outputABuffer; rmm::device_buffer outputBBuffer; rmm::device_buffer outputCBuffer; rmm::device_buffer outputDBuffer; if (pattern.getEntry(0).isVariable()) { outputABuffer.resize(sizeof(idx_t) * output_size); outputA = reinterpret_cast<idx_t*>(outputABuffer.data()); } if (pattern.getEntry(1).isVariable()) { outputBBuffer.resize(sizeof(idx_t) * output_size); outputB = reinterpret_cast<idx_t*>(outputBBuffer.data()); } if (pattern.getEntry(2).isVariable()) { outputCBuffer.resize(sizeof(idx_t) * output_size); outputC = reinterpret_cast<idx_t*>(outputCBuffer.data()); } if (saveRowIds) { outputDBuffer.resize(sizeof(idx_t) * output_size); outputD = reinterpret_cast<idx_t*>(outputDBuffer.data()); } // Get the constant pattern entries from the pattern to pass into the main kernel idx_t patternA = -1; idx_t patternB = -1; idx_t patternC = -1; if (!pattern.getEntry(0).isVariable()) { patternA = pattern.getEntry(0).getConstant(); } if (!pattern.getEntry(1).isVariable()) { patternB = pattern.getEntry(1).getConstant(); } if (!pattern.getEntry(2).isVariable()) { patternC = pattern.getEntry(2).getConstant(); } // Call the main kernel block.x = FIND_MATCHES_BLOCK_SIZE; grid.x = min((idx_t)MAXBLOCKS, (output_size + (idx_t)FIND_MATCHES_BLOCK_SIZE - 1) / (idx_t)FIND_MATCHES_BLOCK_SIZE); findMatchesKernel<<<grid, block, 0, nullptr>>>( frontierSize, output_size, num_blocks, offsets, indirection, reinterpret_cast<idx_t*>(block_bucket_offsets.data()), reinterpret_cast<idx_t*>(exsum_degree.data()), frontier_ptr, columnA, columnB, columnC, outputA, outputB, outputC, outputD, patternA, patternB, patternC); // Get the non-null output columns std::vector<idx_t*> columns; std::vector<std::string> names; if (outputA != nullptr) { columns.push_back(outputA); names.push_back(pattern.getEntry(0).getVariable()); } if (outputB != nullptr) { columns.push_back(outputB); names.push_back(pattern.getEntry(1).getVariable()); } if (outputC != nullptr) { columns.push_back(outputC); names.push_back(pattern.getEntry(2).getVariable()); } if (outputD != nullptr) { columns.push_back(outputD); names.push_back(pattern.getEntry(3).getVariable()); } // Remove non-matches from result rmm::device_buffer flags(sizeof(int8_t) * output_size); idx_t* col_ptr = columns[0]; thrust::transform(rmm::exec_policy(nullptr)->on(nullptr), col_ptr, col_ptr + output_size, reinterpret_cast<int8_t*>(flags.data()), notNegativeOne<idx_t, int8_t>()); size_t tempSpaceSize = 0; rmm::device_buffer compactSize_d(sizeof(idx_t)); cub::DeviceSelect::Flagged(nullptr, tempSpaceSize, col_ptr, reinterpret_cast<int8_t*>(flags.data()), col_ptr, reinterpret_cast<idx_t*>(compactSize_d.data()), output_size); rmm::device_buffer tempSpace(tempSpaceSize); cub::DeviceSelect::Flagged(tempSpace.data(), tempSpaceSize, col_ptr, reinterpret_cast<int8_t*>(flags.data()), col_ptr, reinterpret_cast<idx_t*>(compactSize_d.data()), output_size); idx_t compactSize_h; cudaMemcpy(&compactSize_h, compactSize_d.data(), sizeof(idx_t), cudaMemcpyDefault); for (size_t i = 1; i < columns.size(); i++) { col_ptr = columns[i]; cub::DeviceSelect::Flagged(tempSpace.data(), tempSpaceSize, col_ptr, reinterpret_cast<int8_t*>(flags.data()), col_ptr, reinterpret_cast<idx_t*>(compactSize_d.data()), output_size); } // Put together the result to return db_result<idx_t> result; for (size_t i = 0; i < names.size(); i++) { result.addColumn(names[i]); } result.allocateColumns(compactSize_h); for (size_t i = 0; i < columns.size(); i++) { idx_t* outputPtr = result.getData(names[i]); idx_t* inputPtr = columns[i]; CUDA_TRY(cudaMemcpy(outputPtr, inputPtr, sizeof(idx_t) * compactSize_h, cudaMemcpyDefault)); } // Return the result return result; } template db_result<int32_t> findMatches(db_pattern<int32_t>& pattern, db_table<int32_t>& table, int32_t* frontier, int32_t frontier_size, int indexPosition); template db_result<int64_t> findMatches(db_pattern<int64_t>& pattern, db_table<int64_t>& table, int64_t* frontier, int64_t frontier_size, int indexPosition); } // namespace db } // namespace cugraph
3db4886d89bfd9ce348e1005b68b5f76c383e8cb.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include "RayTracingGM.h" #include <iostream> #include <assert.h> #include "Device.h" #include "AleaTools.h" using namespace gpu; /*----------------------------------------------------------------------*\ |* Declaration *| \*---------------------------------------------------------------------*/ /*--------------------------------------*\ |* Imported *| \*-------------------------------------*/ extern __global__ void rayTracingGM(uchar4* ptrDevPixels, int w, int h, int spheres, int t, Sphere* ptrSpheres); /*--------------------------------------*\ |* Public *| \*-------------------------------------*/ /*--------------------------------------*\ |* Private *| \*-------------------------------------*/ /*----------------------------------------------------------------------*\ |* Implementation *| \*---------------------------------------------------------------------*/ /*--------------------------------------*\ |* Public *| \*-------------------------------------*/ /*-------------------------*\ |* Constructeur *| \*-------------------------*/ RayTracingGM::RayTracingGM(const Grid& grid, uint w, uint h, float dt, int nbSpheres): Animable_I<uchar4>(grid, w, h, "RayTracing_GM_Cuda"), RayTracing(w,h) { this->dt = dt; this->nbSpheres = nbSpheres; this->ptrSpheres = new Sphere[this->nbSpheres]; this->t = 0.0; RayTracing::populateSpheres(this->nbSpheres, this->ptrSpheres); } RayTracingGM::~RayTracingGM() { delete[] ptrSpheres; } /*-------------------------*\ |* Methode *| \*-------------------------*/ /** * Override * Call periodicly by the API */ void RayTracingGM::process(uchar4* ptrDevPixels, uint w, uint h, const DomaineMath& domaineMath) { Sphere* ptrDevSphere=NULL; size_t currentSize = this->nbSpheres * sizeof(Sphere); HANDLE_ERROR(hipMalloc(&ptrDevSphere, currentSize)); HANDLE_ERROR(hipMemcpy(ptrDevSphere, this->ptrSpheres, currentSize, hipMemcpyHostToDevice)); hipLaunchKernelGGL(( rayTracingGM), dim3(dg),dim3(db), 0, 0, ptrDevPixels, w, h, this->nbSpheres, this->t, ptrDevSphere); HANDLE_ERROR(hipFree(ptrDevSphere)); } /** * Override * Call periodicly by the API */ void RayTracingGM::animationStep() { t += dt; } /*--------------------------------------*\ |* Private *| \*-------------------------------------*/ /*----------------------------------------------------------------------*\ |* End *| \*---------------------------------------------------------------------*/
3db4886d89bfd9ce348e1005b68b5f76c383e8cb.cu
#include "RayTracingGM.h" #include <iostream> #include <assert.h> #include "Device.h" #include "AleaTools.h" using namespace gpu; /*----------------------------------------------------------------------*\ |* Declaration *| \*---------------------------------------------------------------------*/ /*--------------------------------------*\ |* Imported *| \*-------------------------------------*/ extern __global__ void rayTracingGM(uchar4* ptrDevPixels, int w, int h, int spheres, int t, Sphere* ptrSpheres); /*--------------------------------------*\ |* Public *| \*-------------------------------------*/ /*--------------------------------------*\ |* Private *| \*-------------------------------------*/ /*----------------------------------------------------------------------*\ |* Implementation *| \*---------------------------------------------------------------------*/ /*--------------------------------------*\ |* Public *| \*-------------------------------------*/ /*-------------------------*\ |* Constructeur *| \*-------------------------*/ RayTracingGM::RayTracingGM(const Grid& grid, uint w, uint h, float dt, int nbSpheres): Animable_I<uchar4>(grid, w, h, "RayTracing_GM_Cuda"), RayTracing(w,h) { this->dt = dt; this->nbSpheres = nbSpheres; this->ptrSpheres = new Sphere[this->nbSpheres]; this->t = 0.0; RayTracing::populateSpheres(this->nbSpheres, this->ptrSpheres); } RayTracingGM::~RayTracingGM() { delete[] ptrSpheres; } /*-------------------------*\ |* Methode *| \*-------------------------*/ /** * Override * Call periodicly by the API */ void RayTracingGM::process(uchar4* ptrDevPixels, uint w, uint h, const DomaineMath& domaineMath) { Sphere* ptrDevSphere=NULL; size_t currentSize = this->nbSpheres * sizeof(Sphere); HANDLE_ERROR(cudaMalloc(&ptrDevSphere, currentSize)); HANDLE_ERROR(cudaMemcpy(ptrDevSphere, this->ptrSpheres, currentSize, cudaMemcpyHostToDevice)); rayTracingGM<<<dg,db>>>(ptrDevPixels, w, h, this->nbSpheres, this->t, ptrDevSphere); HANDLE_ERROR(cudaFree(ptrDevSphere)); } /** * Override * Call periodicly by the API */ void RayTracingGM::animationStep() { t += dt; } /*--------------------------------------*\ |* Private *| \*-------------------------------------*/ /*----------------------------------------------------------------------*\ |* End *| \*---------------------------------------------------------------------*/
021de9720f8912c7e4cab86c5fc6c79fc01f1d10.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include <ftl/render/render_params.hpp> #include "splatter_cuda.hpp" #include <ftl/rgbd/camera.hpp> #include <ftl/cuda_common.hpp> #include <ftl/cuda/makers.hpp> #define T_PER_BLOCK 8 using ftl::cuda::TextureObject; using ftl::render::Parameters; using ftl::rgbd::Projection; /* * DIBR point cloud with a depth check */ template <Projection PROJECT> __global__ void dibr_merge_kernel(TextureObject<float> depth, TextureObject<int> depth_out, float4x4 transform, ftl::rgbd::Camera cam, Parameters params) { const int x = blockIdx.x*blockDim.x + threadIdx.x; const int y = blockIdx.y*blockDim.y + threadIdx.y; const float d0 = depth.tex2D(x, y); if (d0 <= cam.minDepth || d0 >= cam.maxDepth) return; const float3 camPos = transform * cam.screenToCam(x,y,d0); //const float d = camPos.z; //const uint2 screenPos = params.camera.camToScreen<uint2>(camPos); const float3 screenPos = params.camera.project<PROJECT>(camPos); const unsigned int cx = (unsigned int)(screenPos.x+0.5f); const unsigned int cy = (unsigned int)(screenPos.y+0.5f); const float d = screenPos.z; if (d > params.camera.minDepth && d < params.camera.maxDepth && cx < depth_out.width() && cy < depth_out.height()) { // Transform estimated point to virtual cam space and output z atomicMin(&depth_out(cx,cy), d * 100000.0f); } } /* * DIBR Point cloud with a constant depth assumption */ __global__ void dibr_merge_kernel( TextureObject<int> depth_out, float4x4 transform, ftl::rgbd::Camera cam, Parameters params) { const int x = blockIdx.x*blockDim.x + threadIdx.x; const int y = blockIdx.y*blockDim.y + threadIdx.y; const float d0 = 1.0f; const float3 camPos = transform * cam.screenToCam(x,y,d0); const float d = camPos.z; const uint2 screenPos = params.camera.camToScreen<uint2>(camPos); const unsigned int cx = screenPos.x; const unsigned int cy = screenPos.y; if (d > params.camera.minDepth && d < params.camera.maxDepth && cx < depth_out.width() && cy < depth_out.height()) { // Transform estimated point to virtual cam space and output z atomicMin(&depth_out(cx,cy), d * 100000.0f); } } void ftl::cuda::dibr_merge(TextureObject<float> &depth, TextureObject<int> &depth_out, const float4x4 &transform, const ftl::rgbd::Camera &cam, Parameters params, hipStream_t stream) { const dim3 gridSize((depth.width() + T_PER_BLOCK - 1)/T_PER_BLOCK, (depth.height() + T_PER_BLOCK - 1)/T_PER_BLOCK); const dim3 blockSize(T_PER_BLOCK, T_PER_BLOCK); if (params.projection == Projection::PERSPECTIVE) { hipLaunchKernelGGL(( dibr_merge_kernel<Projection::PERSPECTIVE>), dim3(gridSize), dim3(blockSize), 0, stream, depth, depth_out, transform, cam, params); } else { hipLaunchKernelGGL(( dibr_merge_kernel<Projection::ORTHOGRAPHIC>), dim3(gridSize), dim3(blockSize), 0, stream, depth, depth_out, transform, cam, params); } cudaSafeCall( hipGetLastError() ); } void ftl::cuda::dibr_merge(TextureObject<int> &depth_out, const float4x4 &transform, const ftl::rgbd::Camera &cam, Parameters params, hipStream_t stream) { const dim3 gridSize((cam.width + T_PER_BLOCK - 1)/T_PER_BLOCK, (cam.height + T_PER_BLOCK - 1)/T_PER_BLOCK); const dim3 blockSize(T_PER_BLOCK, T_PER_BLOCK); hipLaunchKernelGGL(( dibr_merge_kernel), dim3(gridSize), dim3(blockSize), 0, stream, depth_out, transform, cam, params); cudaSafeCall( hipGetLastError() ); } // ==== Normalize ============================================================== template <typename A, typename B, bool FLIPY> __global__ void dibr_normalise_kernel( TextureObject<A> in, TextureObject<B> out, TextureObject<int> contribs) { const unsigned int x = blockIdx.x*blockDim.x + threadIdx.x; const unsigned int y = blockIdx.y*blockDim.y + threadIdx.y; if (x < in.width() && y < in.height()) { const float contrib = float(contribs.tex2D((int)x,(int)y) & 0xFFFFFF) / float(0xFFFF); const A a = in.tex2D((int)x,(int)y); //const float4 normal = normals.tex2D((int)x,(int)y); //out(x,y) = (contrib == 0.0f) ? make<B>(a) : make<B>(a / contrib); if (contrib > 0.0f) { if (FLIPY) out(x,out.height()-y-1) = make<B>(a / contrib); else out(x,y) = make<B>(a / contrib); //normals(x,y) = normal / contrib; } } } template <typename A, typename B> void ftl::cuda::dibr_normalise(TextureObject<A> &in, TextureObject<B> &out, TextureObject<int> &contribs, bool flip, hipStream_t stream) { const dim3 gridSize((in.width() + T_PER_BLOCK - 1)/T_PER_BLOCK, (in.height() + T_PER_BLOCK - 1)/T_PER_BLOCK); const dim3 blockSize(T_PER_BLOCK, T_PER_BLOCK); if (flip) { hipLaunchKernelGGL(( dibr_normalise_kernel<A,B,true>), dim3(gridSize), dim3(blockSize), 0, stream, in, out, contribs); } else { hipLaunchKernelGGL(( dibr_normalise_kernel<A,B,false>), dim3(gridSize), dim3(blockSize), 0, stream, in, out, contribs); } cudaSafeCall( hipGetLastError() ); } template void ftl::cuda::dibr_normalise<float4,uchar4>(TextureObject<float4> &in, TextureObject<uchar4> &out, TextureObject<int> &contribs, bool, hipStream_t stream); template void ftl::cuda::dibr_normalise<float,float>(TextureObject<float> &in, TextureObject<float> &out, TextureObject<int> &contribs, bool, hipStream_t stream); template void ftl::cuda::dibr_normalise<float4,float4>(TextureObject<float4> &in, TextureObject<float4> &out, TextureObject<int> &contribs, bool, hipStream_t stream); // Float version template <typename A, typename B> __global__ void dibr_normalise_kernel( TextureObject<A> in, TextureObject<B> out, TextureObject<float> weights) { const unsigned int x = blockIdx.x*blockDim.x + threadIdx.x; const unsigned int y = blockIdx.y*blockDim.y + threadIdx.y; if (x < in.width() && y < in.height()) { const float contrib = weights.tex2D((int)x,(int)y); const A a = in.tex2D((int)x,(int)y); if (contrib > 0.0f) { out(x,y) = make<B>(a / contrib); } } } template <typename A, typename B> void ftl::cuda::dibr_normalise(TextureObject<A> &in, TextureObject<B> &out, TextureObject<float> &weights, hipStream_t stream) { const dim3 gridSize((in.width() + T_PER_BLOCK - 1)/T_PER_BLOCK, (in.height() + T_PER_BLOCK - 1)/T_PER_BLOCK); const dim3 blockSize(T_PER_BLOCK, T_PER_BLOCK); hipLaunchKernelGGL(( dibr_normalise_kernel), dim3(gridSize), dim3(blockSize), 0, stream, in, out, weights); cudaSafeCall( hipGetLastError() ); } template void ftl::cuda::dibr_normalise<float4,uchar4>(TextureObject<float4> &in, TextureObject<uchar4> &out, TextureObject<float> &weights, hipStream_t stream); template void ftl::cuda::dibr_normalise<float,float>(TextureObject<float> &in, TextureObject<float> &out, TextureObject<float> &weights, hipStream_t stream); template void ftl::cuda::dibr_normalise<float4,float4>(TextureObject<float4> &in, TextureObject<float4> &out, TextureObject<float> &weights, hipStream_t stream);
021de9720f8912c7e4cab86c5fc6c79fc01f1d10.cu
#include <ftl/render/render_params.hpp> #include "splatter_cuda.hpp" #include <ftl/rgbd/camera.hpp> #include <ftl/cuda_common.hpp> #include <ftl/cuda/makers.hpp> #define T_PER_BLOCK 8 using ftl::cuda::TextureObject; using ftl::render::Parameters; using ftl::rgbd::Projection; /* * DIBR point cloud with a depth check */ template <Projection PROJECT> __global__ void dibr_merge_kernel(TextureObject<float> depth, TextureObject<int> depth_out, float4x4 transform, ftl::rgbd::Camera cam, Parameters params) { const int x = blockIdx.x*blockDim.x + threadIdx.x; const int y = blockIdx.y*blockDim.y + threadIdx.y; const float d0 = depth.tex2D(x, y); if (d0 <= cam.minDepth || d0 >= cam.maxDepth) return; const float3 camPos = transform * cam.screenToCam(x,y,d0); //const float d = camPos.z; //const uint2 screenPos = params.camera.camToScreen<uint2>(camPos); const float3 screenPos = params.camera.project<PROJECT>(camPos); const unsigned int cx = (unsigned int)(screenPos.x+0.5f); const unsigned int cy = (unsigned int)(screenPos.y+0.5f); const float d = screenPos.z; if (d > params.camera.minDepth && d < params.camera.maxDepth && cx < depth_out.width() && cy < depth_out.height()) { // Transform estimated point to virtual cam space and output z atomicMin(&depth_out(cx,cy), d * 100000.0f); } } /* * DIBR Point cloud with a constant depth assumption */ __global__ void dibr_merge_kernel( TextureObject<int> depth_out, float4x4 transform, ftl::rgbd::Camera cam, Parameters params) { const int x = blockIdx.x*blockDim.x + threadIdx.x; const int y = blockIdx.y*blockDim.y + threadIdx.y; const float d0 = 1.0f; const float3 camPos = transform * cam.screenToCam(x,y,d0); const float d = camPos.z; const uint2 screenPos = params.camera.camToScreen<uint2>(camPos); const unsigned int cx = screenPos.x; const unsigned int cy = screenPos.y; if (d > params.camera.minDepth && d < params.camera.maxDepth && cx < depth_out.width() && cy < depth_out.height()) { // Transform estimated point to virtual cam space and output z atomicMin(&depth_out(cx,cy), d * 100000.0f); } } void ftl::cuda::dibr_merge(TextureObject<float> &depth, TextureObject<int> &depth_out, const float4x4 &transform, const ftl::rgbd::Camera &cam, Parameters params, cudaStream_t stream) { const dim3 gridSize((depth.width() + T_PER_BLOCK - 1)/T_PER_BLOCK, (depth.height() + T_PER_BLOCK - 1)/T_PER_BLOCK); const dim3 blockSize(T_PER_BLOCK, T_PER_BLOCK); if (params.projection == Projection::PERSPECTIVE) { dibr_merge_kernel<Projection::PERSPECTIVE><<<gridSize, blockSize, 0, stream>>>(depth, depth_out, transform, cam, params); } else { dibr_merge_kernel<Projection::ORTHOGRAPHIC><<<gridSize, blockSize, 0, stream>>>(depth, depth_out, transform, cam, params); } cudaSafeCall( cudaGetLastError() ); } void ftl::cuda::dibr_merge(TextureObject<int> &depth_out, const float4x4 &transform, const ftl::rgbd::Camera &cam, Parameters params, cudaStream_t stream) { const dim3 gridSize((cam.width + T_PER_BLOCK - 1)/T_PER_BLOCK, (cam.height + T_PER_BLOCK - 1)/T_PER_BLOCK); const dim3 blockSize(T_PER_BLOCK, T_PER_BLOCK); dibr_merge_kernel<<<gridSize, blockSize, 0, stream>>>(depth_out, transform, cam, params); cudaSafeCall( cudaGetLastError() ); } // ==== Normalize ============================================================== template <typename A, typename B, bool FLIPY> __global__ void dibr_normalise_kernel( TextureObject<A> in, TextureObject<B> out, TextureObject<int> contribs) { const unsigned int x = blockIdx.x*blockDim.x + threadIdx.x; const unsigned int y = blockIdx.y*blockDim.y + threadIdx.y; if (x < in.width() && y < in.height()) { const float contrib = float(contribs.tex2D((int)x,(int)y) & 0xFFFFFF) / float(0xFFFF); const A a = in.tex2D((int)x,(int)y); //const float4 normal = normals.tex2D((int)x,(int)y); //out(x,y) = (contrib == 0.0f) ? make<B>(a) : make<B>(a / contrib); if (contrib > 0.0f) { if (FLIPY) out(x,out.height()-y-1) = make<B>(a / contrib); else out(x,y) = make<B>(a / contrib); //normals(x,y) = normal / contrib; } } } template <typename A, typename B> void ftl::cuda::dibr_normalise(TextureObject<A> &in, TextureObject<B> &out, TextureObject<int> &contribs, bool flip, cudaStream_t stream) { const dim3 gridSize((in.width() + T_PER_BLOCK - 1)/T_PER_BLOCK, (in.height() + T_PER_BLOCK - 1)/T_PER_BLOCK); const dim3 blockSize(T_PER_BLOCK, T_PER_BLOCK); if (flip) { dibr_normalise_kernel<A,B,true><<<gridSize, blockSize, 0, stream>>>(in, out, contribs); } else { dibr_normalise_kernel<A,B,false><<<gridSize, blockSize, 0, stream>>>(in, out, contribs); } cudaSafeCall( cudaGetLastError() ); } template void ftl::cuda::dibr_normalise<float4,uchar4>(TextureObject<float4> &in, TextureObject<uchar4> &out, TextureObject<int> &contribs, bool, cudaStream_t stream); template void ftl::cuda::dibr_normalise<float,float>(TextureObject<float> &in, TextureObject<float> &out, TextureObject<int> &contribs, bool, cudaStream_t stream); template void ftl::cuda::dibr_normalise<float4,float4>(TextureObject<float4> &in, TextureObject<float4> &out, TextureObject<int> &contribs, bool, cudaStream_t stream); // Float version template <typename A, typename B> __global__ void dibr_normalise_kernel( TextureObject<A> in, TextureObject<B> out, TextureObject<float> weights) { const unsigned int x = blockIdx.x*blockDim.x + threadIdx.x; const unsigned int y = blockIdx.y*blockDim.y + threadIdx.y; if (x < in.width() && y < in.height()) { const float contrib = weights.tex2D((int)x,(int)y); const A a = in.tex2D((int)x,(int)y); if (contrib > 0.0f) { out(x,y) = make<B>(a / contrib); } } } template <typename A, typename B> void ftl::cuda::dibr_normalise(TextureObject<A> &in, TextureObject<B> &out, TextureObject<float> &weights, cudaStream_t stream) { const dim3 gridSize((in.width() + T_PER_BLOCK - 1)/T_PER_BLOCK, (in.height() + T_PER_BLOCK - 1)/T_PER_BLOCK); const dim3 blockSize(T_PER_BLOCK, T_PER_BLOCK); dibr_normalise_kernel<<<gridSize, blockSize, 0, stream>>>(in, out, weights); cudaSafeCall( cudaGetLastError() ); } template void ftl::cuda::dibr_normalise<float4,uchar4>(TextureObject<float4> &in, TextureObject<uchar4> &out, TextureObject<float> &weights, cudaStream_t stream); template void ftl::cuda::dibr_normalise<float,float>(TextureObject<float> &in, TextureObject<float> &out, TextureObject<float> &weights, cudaStream_t stream); template void ftl::cuda::dibr_normalise<float4,float4>(TextureObject<float4> &in, TextureObject<float4> &out, TextureObject<float> &weights, cudaStream_t stream);
0f1abb72e48d261930adc682662c9dc26f9a0b4e.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /* -- MAGMA (version 2.0.0) -- Univ. of Tennessee, Knoxville Univ. of California, Berkeley Univ. of Colorado, Denver @date February 2016 @generated from magmablas/ztranspose_conj_inplace.cu normal z -> c, Tue Feb 9 16:05:33 2016 @author Stan Tomov @author Mark Gates */ #include "magma_internal.h" #define NB 16 // nearly same code in ctranspose_inplace.cu //////////////////////////////////////////////////////////////////////////////// // grid is (n/nb) x ((n/nb)/2 + 1), where n/nb is odd. // lower indicates blocks in lower triangle of grid, including diagonal. // lower blocks cover left side of matrix, including diagonal. // upper blocks swap block indices (x,y) and shift by grid width (or width-1) // to cover right side of matrix. // [ A00 A01 A02 ] [ A00 . . | . . ] // [ A10 A11 A12 ] [ A10 A11 . | . . ] // grid [ A20 A21 A22 ] covers matrix as [ A20 A21 A22 | . . ] // [ A30 A31 A32 ] [ A30 A31 A32 | A01 . ] // [ A40 A41 A42 ] [ A40 A41 A42 | A02 A12 ] // // See ctranspose_conj_inplace_even for description of threads. __global__ void ctranspose_conj_inplace_odd( int n, magmaFloatComplex *matrix, int lda ) { __shared__ magmaFloatComplex sA[ NB ][ NB+1 ]; __shared__ magmaFloatComplex sB[ NB ][ NB+1 ]; int i = threadIdx.x; int j = threadIdx.y; bool lower = (blockIdx.x >= blockIdx.y); int ii = (lower ? blockIdx.x : (blockIdx.y + gridDim.y - 1)); int jj = (lower ? blockIdx.y : (blockIdx.x + gridDim.y )); ii *= NB; jj *= NB; magmaFloatComplex *A = matrix + ii+i + (jj+j)*lda; if ( ii == jj ) { if ( ii+i < n && jj+j < n ) { sA[j][i] = MAGMA_C_CONJ( *A ); } __syncthreads(); if ( ii+i < n && jj+j < n ) { *A = sA[i][j]; } } else { magmaFloatComplex *B = matrix + jj+i + (ii+j)*lda; if ( ii+i < n && jj+j < n ) { sA[j][i] = MAGMA_C_CONJ( *A ); } if ( jj+i < n && ii+j < n ) { sB[j][i] = MAGMA_C_CONJ( *B ); } __syncthreads(); if ( ii+i < n && jj+j < n ) { *A = sB[i][j]; } if ( jj+i < n && ii+j < n ) { *B = sA[i][j]; } } } //////////////////////////////////////////////////////////////////////////////// // grid is ((n/nb) + 1) x (n/nb)/2, where n/nb is even. // lower indicates blocks in strictly lower triangle of grid, excluding diagonal. // lower blocks shift up by one to cover left side of matrix including diagonal. // upper blocks swap block indices (x,y) and shift by grid width // to cover right side of matrix. // [ A00 A01 ] [ A10 . | . . ] // [ A10 A11 ] [ A20 A21 | . . ] // grid [ A20 A21 ] covers matrix as [ A30 A31 | A00 . ] // [ A30 A31 ] [ A40 A41 | A01 A11 ] // [ A40 A41 ] // // Each block is NB x NB threads. // For non-diagonal block A, block B is symmetric block. // Thread (i,j) loads A(i,j) into sA(j,i) and B(i,j) into sB(j,i), i.e., transposed, // syncs, then saves sA(i,j) to B(i,j) and sB(i,j) to A(i,j). // Threads outside the matrix do not touch memory. __global__ void ctranspose_conj_inplace_even( int n, magmaFloatComplex *matrix, int lda ) { __shared__ magmaFloatComplex sA[ NB ][ NB+1 ]; __shared__ magmaFloatComplex sB[ NB ][ NB+1 ]; int i = threadIdx.x; int j = threadIdx.y; bool lower = (blockIdx.x > blockIdx.y); int ii = (lower ? (blockIdx.x - 1) : (blockIdx.y + gridDim.y)); int jj = (lower ? (blockIdx.y ) : (blockIdx.x + gridDim.y)); ii *= NB; jj *= NB; magmaFloatComplex *A = matrix + ii+i + (jj+j)*lda; if ( ii == jj ) { if ( ii+i < n && jj+j < n ) { sA[j][i] = MAGMA_C_CONJ( *A ); } __syncthreads(); if ( ii+i < n && jj+j < n ) { *A = sA[i][j]; } } else { magmaFloatComplex *B = matrix + jj+i + (ii+j)*lda; if ( ii+i < n && jj+j < n ) { sA[j][i] = MAGMA_C_CONJ( *A ); } if ( jj+i < n && ii+j < n ) { sB[j][i] = MAGMA_C_CONJ( *B ); } __syncthreads(); if ( ii+i < n && jj+j < n ) { *A = sB[i][j]; } if ( jj+i < n && ii+j < n ) { *B = sA[i][j]; } } } /** Purpose ------- ctranspose_conj_inplace_q conjugate-transposes a square N-by-N matrix in-place. Same as ctranspose_conj_inplace, but adds queue argument. Arguments --------- @param[in] n INTEGER The number of rows & columns of the matrix dA. N >= 0. @param[in] dA COMPLEX array, dimension (LDDA,N) The N-by-N matrix dA. On exit, dA(j,i) = dA_original(i,j), for 0 <= i,j < N. @param[in] ldda INTEGER The leading dimension of the array dA. LDDA >= N. @param[in] queue magma_queue_t Queue to execute in. @ingroup magma_caux2 ********************************************************************/ extern "C" void magmablas_ctranspose_conj_inplace_q( magma_int_t n, magmaFloatComplex_ptr dA, magma_int_t ldda, magma_queue_t queue ) { magma_int_t info = 0; if ( n < 0 ) info = -1; else if ( ldda < n ) info = -3; if ( info != 0 ) { magma_xerbla( __func__, -(info) ); return; //info; } dim3 threads( NB, NB ); int nblock = magma_ceildiv( n, NB ); // need 1/2 * (nblock+1) * nblock to cover lower triangle and diagonal of matrix. // block assignment differs depending on whether nblock is odd or even. if ( nblock % 2 == 1 ) { dim3 grid( nblock, (nblock+1)/2 ); hipLaunchKernelGGL(( ctranspose_conj_inplace_odd), dim3(grid), dim3(threads), 0, queue->cuda_stream() , n, dA, ldda ); } else { dim3 grid( nblock+1, nblock/2 ); hipLaunchKernelGGL(( ctranspose_conj_inplace_even), dim3(grid), dim3(threads), 0, queue->cuda_stream() , n, dA, ldda ); } }
0f1abb72e48d261930adc682662c9dc26f9a0b4e.cu
/* -- MAGMA (version 2.0.0) -- Univ. of Tennessee, Knoxville Univ. of California, Berkeley Univ. of Colorado, Denver @date February 2016 @generated from magmablas/ztranspose_conj_inplace.cu normal z -> c, Tue Feb 9 16:05:33 2016 @author Stan Tomov @author Mark Gates */ #include "magma_internal.h" #define NB 16 // nearly same code in ctranspose_inplace.cu //////////////////////////////////////////////////////////////////////////////// // grid is (n/nb) x ((n/nb)/2 + 1), where n/nb is odd. // lower indicates blocks in lower triangle of grid, including diagonal. // lower blocks cover left side of matrix, including diagonal. // upper blocks swap block indices (x,y) and shift by grid width (or width-1) // to cover right side of matrix. // [ A00 A01 A02 ] [ A00 . . | . . ] // [ A10 A11 A12 ] [ A10 A11 . | . . ] // grid [ A20 A21 A22 ] covers matrix as [ A20 A21 A22 | . . ] // [ A30 A31 A32 ] [ A30 A31 A32 | A01 . ] // [ A40 A41 A42 ] [ A40 A41 A42 | A02 A12 ] // // See ctranspose_conj_inplace_even for description of threads. __global__ void ctranspose_conj_inplace_odd( int n, magmaFloatComplex *matrix, int lda ) { __shared__ magmaFloatComplex sA[ NB ][ NB+1 ]; __shared__ magmaFloatComplex sB[ NB ][ NB+1 ]; int i = threadIdx.x; int j = threadIdx.y; bool lower = (blockIdx.x >= blockIdx.y); int ii = (lower ? blockIdx.x : (blockIdx.y + gridDim.y - 1)); int jj = (lower ? blockIdx.y : (blockIdx.x + gridDim.y )); ii *= NB; jj *= NB; magmaFloatComplex *A = matrix + ii+i + (jj+j)*lda; if ( ii == jj ) { if ( ii+i < n && jj+j < n ) { sA[j][i] = MAGMA_C_CONJ( *A ); } __syncthreads(); if ( ii+i < n && jj+j < n ) { *A = sA[i][j]; } } else { magmaFloatComplex *B = matrix + jj+i + (ii+j)*lda; if ( ii+i < n && jj+j < n ) { sA[j][i] = MAGMA_C_CONJ( *A ); } if ( jj+i < n && ii+j < n ) { sB[j][i] = MAGMA_C_CONJ( *B ); } __syncthreads(); if ( ii+i < n && jj+j < n ) { *A = sB[i][j]; } if ( jj+i < n && ii+j < n ) { *B = sA[i][j]; } } } //////////////////////////////////////////////////////////////////////////////// // grid is ((n/nb) + 1) x (n/nb)/2, where n/nb is even. // lower indicates blocks in strictly lower triangle of grid, excluding diagonal. // lower blocks shift up by one to cover left side of matrix including diagonal. // upper blocks swap block indices (x,y) and shift by grid width // to cover right side of matrix. // [ A00 A01 ] [ A10 . | . . ] // [ A10 A11 ] [ A20 A21 | . . ] // grid [ A20 A21 ] covers matrix as [ A30 A31 | A00 . ] // [ A30 A31 ] [ A40 A41 | A01 A11 ] // [ A40 A41 ] // // Each block is NB x NB threads. // For non-diagonal block A, block B is symmetric block. // Thread (i,j) loads A(i,j) into sA(j,i) and B(i,j) into sB(j,i), i.e., transposed, // syncs, then saves sA(i,j) to B(i,j) and sB(i,j) to A(i,j). // Threads outside the matrix do not touch memory. __global__ void ctranspose_conj_inplace_even( int n, magmaFloatComplex *matrix, int lda ) { __shared__ magmaFloatComplex sA[ NB ][ NB+1 ]; __shared__ magmaFloatComplex sB[ NB ][ NB+1 ]; int i = threadIdx.x; int j = threadIdx.y; bool lower = (blockIdx.x > blockIdx.y); int ii = (lower ? (blockIdx.x - 1) : (blockIdx.y + gridDim.y)); int jj = (lower ? (blockIdx.y ) : (blockIdx.x + gridDim.y)); ii *= NB; jj *= NB; magmaFloatComplex *A = matrix + ii+i + (jj+j)*lda; if ( ii == jj ) { if ( ii+i < n && jj+j < n ) { sA[j][i] = MAGMA_C_CONJ( *A ); } __syncthreads(); if ( ii+i < n && jj+j < n ) { *A = sA[i][j]; } } else { magmaFloatComplex *B = matrix + jj+i + (ii+j)*lda; if ( ii+i < n && jj+j < n ) { sA[j][i] = MAGMA_C_CONJ( *A ); } if ( jj+i < n && ii+j < n ) { sB[j][i] = MAGMA_C_CONJ( *B ); } __syncthreads(); if ( ii+i < n && jj+j < n ) { *A = sB[i][j]; } if ( jj+i < n && ii+j < n ) { *B = sA[i][j]; } } } /** Purpose ------- ctranspose_conj_inplace_q conjugate-transposes a square N-by-N matrix in-place. Same as ctranspose_conj_inplace, but adds queue argument. Arguments --------- @param[in] n INTEGER The number of rows & columns of the matrix dA. N >= 0. @param[in] dA COMPLEX array, dimension (LDDA,N) The N-by-N matrix dA. On exit, dA(j,i) = dA_original(i,j), for 0 <= i,j < N. @param[in] ldda INTEGER The leading dimension of the array dA. LDDA >= N. @param[in] queue magma_queue_t Queue to execute in. @ingroup magma_caux2 ********************************************************************/ extern "C" void magmablas_ctranspose_conj_inplace_q( magma_int_t n, magmaFloatComplex_ptr dA, magma_int_t ldda, magma_queue_t queue ) { magma_int_t info = 0; if ( n < 0 ) info = -1; else if ( ldda < n ) info = -3; if ( info != 0 ) { magma_xerbla( __func__, -(info) ); return; //info; } dim3 threads( NB, NB ); int nblock = magma_ceildiv( n, NB ); // need 1/2 * (nblock+1) * nblock to cover lower triangle and diagonal of matrix. // block assignment differs depending on whether nblock is odd or even. if ( nblock % 2 == 1 ) { dim3 grid( nblock, (nblock+1)/2 ); ctranspose_conj_inplace_odd<<< grid, threads, 0, queue->cuda_stream() >>>( n, dA, ldda ); } else { dim3 grid( nblock+1, nblock/2 ); ctranspose_conj_inplace_even<<< grid, threads, 0, queue->cuda_stream() >>>( n, dA, ldda ); } }
f4977f7f957d65a34b6f98a60eab59144e457649.hip
// !!! This is a file automatically generated by hipify!!! #include <stdbool.h> #include <stdio.h> #include <string.h> #include <getopt.h> #include <hiprand/hiprand_kernel.h> #include <stdlib.h> #include <hip/hip_runtime.h> #include <sys/time.h> #include "pack_right.cu" #include<chrono> #include<iostream> using namespace std; using namespace std::chrono; int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}}; int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}}; int main(int argc, char **argv) { hipSetDevice(0); char* p;int matrix_len=strtol(argv[1], &p, 10); for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){ for(int block_looper=0;block_looper<20;block_looper++){ int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1]; const int x = 1; const int y = 1; const int halo_depth = 1; double *field = NULL; hipMalloc(&field, XSIZE*YSIZE); double *buffer = NULL; hipMalloc(&buffer, XSIZE*YSIZE); const int depth = 1; int iXSIZE= XSIZE; int iYSIZE= YSIZE; while(iXSIZE%BLOCKX!=0) { iXSIZE++; } while(iYSIZE%BLOCKY!=0) { iYSIZE++; } dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY); dim3 threadBlock(BLOCKX, BLOCKY); hipFree(0);hipLaunchKernelGGL(( pack_right), dim3(gridBlock),dim3(threadBlock), 0, 0, x,y,halo_depth,field,buffer,depth); hipDeviceSynchronize(); for (int loop_counter = 0; loop_counter < 10; ++loop_counter) {hipLaunchKernelGGL(( pack_right), dim3(gridBlock),dim3(threadBlock), 0, 0, x,y,halo_depth,field,buffer,depth); } auto start = steady_clock::now(); for (int loop_counter = 0; loop_counter < 1000; loop_counter++) {hipLaunchKernelGGL(( pack_right), dim3(gridBlock),dim3(threadBlock), 0, 0, x,y,halo_depth,field,buffer,depth); } auto end = steady_clock::now(); auto usecs = duration_cast<duration<float, microseconds::period> >(end - start); cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl; } }}
f4977f7f957d65a34b6f98a60eab59144e457649.cu
#include <stdbool.h> #include <stdio.h> #include <string.h> #include <getopt.h> #include <curand_kernel.h> #include <stdlib.h> #include <cuda.h> #include <sys/time.h> #include "pack_right.cu" #include<chrono> #include<iostream> using namespace std; using namespace std::chrono; int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}}; int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}}; int main(int argc, char **argv) { cudaSetDevice(0); char* p;int matrix_len=strtol(argv[1], &p, 10); for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){ for(int block_looper=0;block_looper<20;block_looper++){ int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1]; const int x = 1; const int y = 1; const int halo_depth = 1; double *field = NULL; cudaMalloc(&field, XSIZE*YSIZE); double *buffer = NULL; cudaMalloc(&buffer, XSIZE*YSIZE); const int depth = 1; int iXSIZE= XSIZE; int iYSIZE= YSIZE; while(iXSIZE%BLOCKX!=0) { iXSIZE++; } while(iYSIZE%BLOCKY!=0) { iYSIZE++; } dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY); dim3 threadBlock(BLOCKX, BLOCKY); cudaFree(0); pack_right<<<gridBlock,threadBlock>>>(x,y,halo_depth,field,buffer,depth); cudaDeviceSynchronize(); for (int loop_counter = 0; loop_counter < 10; ++loop_counter) { pack_right<<<gridBlock,threadBlock>>>(x,y,halo_depth,field,buffer,depth); } auto start = steady_clock::now(); for (int loop_counter = 0; loop_counter < 1000; loop_counter++) { pack_right<<<gridBlock,threadBlock>>>(x,y,halo_depth,field,buffer,depth); } auto end = steady_clock::now(); auto usecs = duration_cast<duration<float, microseconds::period> >(end - start); cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl; } }}
4aa8f760e560e9478cf5b2f89ac7bff5ce9079b3.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include <iostream> #include <memory> #include <chrono> #include <cstdlib> #include <vector> //kernel template<typename T> __global__ void spMulAdd(const int * __restrict__ row, const int * __restrict__ col, const T * __restrict__ val, const T * __restrict__ dx, T * __restrict__ dy, int n, int nnz) { auto tid = threadIdx.x + blockIdx.x * blockDim.x; T y_val = 0.0; // Ax=yy if (tid < n) { // C++ A #pragma unroll for (auto j = row[tid]; j < row[tid + 1]; ++j) { y_val += val[j] * dx[col[j]]; } dy[tid] = y_val; // n(y) tid += blockIdx.x * blockDim.x; } } template<typename T> __device__ T warp_reduction(T val) { #define warpSize 32 for (auto offset = warpSize / 2; offset > 0; offset /= 2) { val += __shfl_down_sync(0xFFFFFFFF, val, offset, warpSize); } return val; } template<typename T> __global__ void spMulAdd_vector(const int * __restrict__ row, const int * __restrict__ col, const T * __restrict__ val, const T * __restrict__ dx, T * __restrict__ dy, int n, int nnz) { auto tid = threadIdx.x + blockIdx.x * blockDim.x; auto rowid = tid / warpSize; auto lane = tid % warpSize; T y_val = 0; if (rowid < n) { for (auto i = row[rowid] + lane; i < row[rowid + 1]; i += warpSize) { y_val += val[i] * dx[col[i]]; } y_val = warp_reduction<T>(y_val); } if (lane == 0 && rowid < n) { dy[rowid] = y_val; } } int main(int args, char *argv[]) { // n int n; n = atoi(argv[1]); // int *row, *col; double *val, *vec_x, *vec_y; std::unique_ptr<double[]> host_a(new double[n * n]); for (auto i = 0; i < n * n; i++) { if (static_cast<double>(std::rand()) / RAND_MAX < 0.5) { //host_a[i] = static_cast<double>(std::rand()) / RAND_MAX; host_a[i] = 1; } else { host_a[i] = 0; } } std::unique_ptr<int[]> host_row(new int[n + 1]); std::vector<int> host_col; std::vector<double> host_val; auto nnz = 0; host_row[0] = nnz; for (auto i = 0; i < n; i++) { for (auto j = 0; j < n; j++) { if (host_a[i * n + j] != 0) { host_val.push_back(host_a[i * n + j]); host_col.push_back(j); nnz++; } } host_row[i + 1] = nnz; } // xy std::unique_ptr<double[]> host_x(new double[n]); std::unique_ptr<double[]> host_y(new double[n]); for (auto i = 0; i < n; i++) { //host_x[i] = static_cast<double>(rand()) / RAND_MAX; host_x[i] = 1; host_y[i] = 0; } // gpu hipMalloc((void**)&row, (n + 1) * sizeof(int)); hipMalloc((void**)&col, nnz * sizeof(int)); hipMalloc((void**)&val, nnz * sizeof(double)); hipMalloc((void**)&vec_x, n * sizeof(double)); hipMalloc((void**)&vec_y, n * sizeof(double)); hipMemcpy(row, host_row.get(), (n + 1) * sizeof(int), hipMemcpyHostToDevice); auto* p_host_col = host_col.data(); hipMemcpy(col, p_host_col, nnz * sizeof(int), hipMemcpyHostToDevice); auto* p_host_val = host_val.data(); hipMemcpy(val, p_host_val, nnz * sizeof(double), hipMemcpyHostToDevice); hipMemcpy(vec_x, host_x.get(), n * sizeof(double), hipMemcpyHostToDevice); hipMemcpy(vec_y, host_y.get(), n * sizeof(double), hipMemcpyHostToDevice); // ? auto blocksize = 32; dim3 block (blocksize, 1, 1); dim3 grid (warpSize * ::ceil(n / static_cast<double>(block.x)), 1, 1); // ok? std::chrono::system_clock::time_point start, end; start = std::chrono::system_clock::now(); // hipLaunchKernelGGL(( spMulAdd_vector<double>) , dim3(grid), dim3(block), 0, 0, row, col, val, vec_x, vec_y, n, nnz); end = std::chrono::system_clock::now(); // cpu std::unique_ptr<double[]> result(new double[n]); hipMemcpy(result.get(), vec_y, n * sizeof(n), hipMemcpyDeviceToHost); std::unique_ptr<double[]> host_result(new double[n]); for (auto i = 0; i < n; i++) { host_result[i] = 0; } for (auto i = 0; i < n; i++) { for (auto j = 0; j < n; j++) { host_result[i] += host_a[i * n + j] * host_x[j]; } } auto checker = 0; for (auto i = 0; i < n; i++) { // double auto m = 7 - std::log10(n); if (fabs(host_result[i] - result[i]) > ::pow(10, -m)) { // NG std::cout << "ng: " << result[i] << std::endl; checker++; } } if (checker == 0) { std::cout << "ok" << std::endl; } else { std::cout << checker << std::endl; } // () auto time = static_cast<double>(std::chrono::duration_cast<std::chrono::microseconds>(end - start).count() / 1000.0); std::cout << "n: " << n << ", nnz: " << nnz << ", threads: " << blocksize << std::endl; std::cout << "time: " << time << " [ms]" << std::endl; std::cout << "perf: " << 2 * n * n / time / 1e6 << " [Gflops/sec]" << std::endl; hipFree(row); hipFree(col); hipFree(val); hipFree(vec_x); hipFree(vec_y); return 0; }
4aa8f760e560e9478cf5b2f89ac7bff5ce9079b3.cu
#include <iostream> #include <memory> #include <chrono> #include <cstdlib> #include <vector> //kernel template<typename T> __global__ void spMulAdd(const int * __restrict__ row, const int * __restrict__ col, const T * __restrict__ val, const T * __restrict__ dx, T * __restrict__ dy, int n, int nnz) { auto tid = threadIdx.x + blockIdx.x * blockDim.x; T y_val = 0.0; // Ax=yにおいて、ベクトルyの成分を各スレッドが計算するように並列化 if (tid < n) { // C++ は列優先だから、各スレッドは行列Aの各列のデータが読めれば良い #pragma unroll for (auto j = row[tid]; j < row[tid + 1]; ++j) { y_val += val[j] * dx[col[j]]; } dy[tid] = y_val; // スレッド番号がnになるまで(yの全要素計算するまで)インクリメント tid += blockIdx.x * blockDim.x; } } template<typename T> __device__ T warp_reduction(T val) { #define warpSize 32 for (auto offset = warpSize / 2; offset > 0; offset /= 2) { val += __shfl_down_sync(0xFFFFFFFF, val, offset, warpSize); } return val; } template<typename T> __global__ void spMulAdd_vector(const int * __restrict__ row, const int * __restrict__ col, const T * __restrict__ val, const T * __restrict__ dx, T * __restrict__ dy, int n, int nnz) { auto tid = threadIdx.x + blockIdx.x * blockDim.x; auto rowid = tid / warpSize; auto lane = tid % warpSize; T y_val = 0; if (rowid < n) { for (auto i = row[rowid] + lane; i < row[rowid + 1]; i += warpSize) { y_val += val[i] * dx[col[i]]; } y_val = warp_reduction<T>(y_val); } if (lane == 0 && rowid < n) { dy[rowid] = y_val; } } int main(int args, char *argv[]) { // n は実行時引数で与える int n; n = atoi(argv[1]); // 疎行列を作るところ int *row, *col; double *val, *vec_x, *vec_y; std::unique_ptr<double[]> host_a(new double[n * n]); for (auto i = 0; i < n * n; i++) { if (static_cast<double>(std::rand()) / RAND_MAX < 0.5) { //host_a[i] = static_cast<double>(std::rand()) / RAND_MAX; host_a[i] = 1; } else { host_a[i] = 0; } } std::unique_ptr<int[]> host_row(new int[n + 1]); std::vector<int> host_col; std::vector<double> host_val; auto nnz = 0; host_row[0] = nnz; for (auto i = 0; i < n; i++) { for (auto j = 0; j < n; j++) { if (host_a[i * n + j] != 0) { host_val.push_back(host_a[i * n + j]); host_col.push_back(j); nnz++; } } host_row[i + 1] = nnz; } // ベクトルxとベクトルyを作るところ std::unique_ptr<double[]> host_x(new double[n]); std::unique_ptr<double[]> host_y(new double[n]); for (auto i = 0; i < n; i++) { //host_x[i] = static_cast<double>(rand()) / RAND_MAX; host_x[i] = 1; host_y[i] = 0; } // gpu に渡すところ cudaMalloc((void**)&row, (n + 1) * sizeof(int)); cudaMalloc((void**)&col, nnz * sizeof(int)); cudaMalloc((void**)&val, nnz * sizeof(double)); cudaMalloc((void**)&vec_x, n * sizeof(double)); cudaMalloc((void**)&vec_y, n * sizeof(double)); cudaMemcpy(row, host_row.get(), (n + 1) * sizeof(int), cudaMemcpyHostToDevice); auto* p_host_col = host_col.data(); cudaMemcpy(col, p_host_col, nnz * sizeof(int), cudaMemcpyHostToDevice); auto* p_host_val = host_val.data(); cudaMemcpy(val, p_host_val, nnz * sizeof(double), cudaMemcpyHostToDevice); cudaMemcpy(vec_x, host_x.get(), n * sizeof(double), cudaMemcpyHostToDevice); cudaMemcpy(vec_y, host_y.get(), n * sizeof(double), cudaMemcpyHostToDevice); // スレッドサイズはどう決めるのがよいのだろうか? auto blocksize = 32; dim3 block (blocksize, 1, 1); dim3 grid (warpSize * std::ceil(n / static_cast<double>(block.x)), 1, 1); // 時間計測するところ、データ転送は含まなくてok? std::chrono::system_clock::time_point start, end; start = std::chrono::system_clock::now(); // 計算するところ spMulAdd_vector<double> <<<grid, block>>>(row, col, val, vec_x, vec_y, n, nnz); end = std::chrono::system_clock::now(); // 結果があっているかcpuでも計算して確認するところ std::unique_ptr<double[]> result(new double[n]); cudaMemcpy(result.get(), vec_y, n * sizeof(n), cudaMemcpyDeviceToHost); std::unique_ptr<double[]> host_result(new double[n]); for (auto i = 0; i < n; i++) { host_result[i] = 0; } for (auto i = 0; i < n; i++) { for (auto j = 0; j < n; j++) { host_result[i] += host_a[i * n + j] * host_x[j]; } } auto checker = 0; for (auto i = 0; i < n; i++) { // double で誤差含めてだいたいこのくらい合ってれば正しい? auto m = 7 - std::log10(n); if (fabs(host_result[i] - result[i]) > std::pow(10, -m)) { // 基準を満たさなかったら NG std::cout << "ng: " << result[i] << std::endl; checker++; } } if (checker == 0) { std::cout << "ok" << std::endl; } else { std::cout << checker << std::endl; } // 計算時間(データ転送含めない?)や次数、実効性能を出力 auto time = static_cast<double>(std::chrono::duration_cast<std::chrono::microseconds>(end - start).count() / 1000.0); std::cout << "n: " << n << ", nnz: " << nnz << ", threads: " << blocksize << std::endl; std::cout << "time: " << time << " [ms]" << std::endl; std::cout << "perf: " << 2 * n * n / time / 1e6 << " [Gflops/sec]" << std::endl; cudaFree(row); cudaFree(col); cudaFree(val); cudaFree(vec_x); cudaFree(vec_y); return 0; }
6833662ade8e9f33ff9df820bcf8883eb47b6a81.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" // Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. #include <math.h> // for sqrt in CPU and CUDA #include <vector> #include "paddle/fluid/framework/tensor_util.h" #include "paddle/fluid/operators/math/selected_rows_functor.h" #include "paddle/phi/backends/gpu/gpu_context.h" #include "paddle/phi/common/amp_type_traits.h" #include "paddle/phi/common/float16.h" #include "paddle/phi/core/kernel_registry.h" #include "paddle/phi/kernels/copy_kernel.h" #include "paddle/phi/kernels/funcs/adam_functors.h" #include "paddle/phi/kernels/funcs/for_range.h" #include "paddle/phi/kernels/selected_rows/adamw_kernel.h" namespace phi { namespace sr { template <typename T> __global__ void UpdateAdamWBetaPow(T beta1, T beta2, const T* beta1_pow_, const T* beta2_pow_, T* beta1_pow_out, T* beta2_pow_out) { *beta1_pow_out = beta1 * beta1_pow_[0]; *beta2_pow_out = beta2 * beta2_pow_[0]; } template <typename T, typename MT> __global__ void SparseAdamWCUDAKernelREG(MT beta1, MT beta2, MT epsilon, MT coeff, MT lr_ratio, const MT beta1_pow, const MT beta2_pow, const MT* mom1_, MT* mom1_out_, const MT* mom2_, MT* mom2_out_, const MT* lr_, const T* grad_, const T* param_, T* param_out_, const MT* master_param, MT* master_param_out, const int64_t* rows_, int64_t row_numel, int64_t row_count, bool lazy_mode, int ndim) { int id = blockIdx.x * blockDim.x + threadIdx.x; MT lr = *lr_ * lr_ratio; for (; id < ndim; id += blockDim.x * gridDim.x) { auto row_idx = phi::funcs::BinarySearch<int64_t>(rows_, row_count, id / row_numel); if (lazy_mode && row_idx < 0) { return; } else { MT mom1 = static_cast<MT>(mom1_[id]); MT mom2 = static_cast<MT>(mom2_[id]); MT p = master_param ? master_param[id] : static_cast<MT>(param_[id]); MT g = row_idx >= 0 ? static_cast<MT>(grad_[row_idx * row_numel + id % row_numel]) : static_cast<MT>(0); p *= (static_cast<MT>(1.0) - lr * coeff); mom1 = beta1 * mom1 + (static_cast<MT>(1.0) - beta1) * g; mom2 = beta2 * mom2 + (static_cast<MT>(1.0) - beta2) * g * g; MT denom = (sqrt(mom2) / sqrt(static_cast<MT>(1.0) - beta2_pow)) + epsilon; p += (mom1 / denom) * (-(lr / (static_cast<MT>(1.0) - beta1_pow))); // Write back to global memory mom1_out_[id] = mom1; mom2_out_[id] = mom2; param_out_[id] = static_cast<T>(p); if (master_param_out) { master_param_out[id] = p; } } } } template <typename T, typename Context> void AdamwDenseParamSparseGradKernel( const Context& dev_ctx, const DenseTensor& param, const SelectedRows& grad, const DenseTensor& learning_rate, const DenseTensor& moment1, const DenseTensor& moment2, const DenseTensor& beta1_pow, const DenseTensor& beta2_pow, const paddle::optional<DenseTensor>& master_param, const paddle::optional<DenseTensor>& skip_update, const Scalar& beta1, const Scalar& beta2, const Scalar& epsilon, float lr_ratio, float coeff, bool with_decay, bool lazy_mode, int64_t min_row_size_to_use_multithread, bool multi_precision, bool use_global_beta_pow, DenseTensor* param_out, DenseTensor* moment1_out, DenseTensor* moment2_out, DenseTensor* beta1_pow_out, DenseTensor* beta2_pow_out, DenseTensor* master_param_outs) { using MPDType = typename phi::dtype::MPTypeTrait<T>::Type; VLOG(4) << "use_global_beta_pow:" << use_global_beta_pow; MPDType coeff_ = static_cast<MPDType>(coeff); MPDType lr_ratio_ = static_cast<MPDType>(lr_ratio); bool skip_update_ = false; if (skip_update.is_initialized()) { PADDLE_ENFORCE_EQ( skip_update->numel(), 1, errors::InvalidArgument("Input(SkipUpdate) size must be 1, but get %d", skip_update->numel())); std::vector<bool> skip_update_vec; paddle::framework::TensorToVector(*skip_update, dev_ctx, &skip_update_vec); skip_update_ = skip_update_vec[0]; } // skip_update=true, just copy input to output, and TensorCopy will call // mutable_data if (skip_update_) { VLOG(4) << "Adamw skip update"; phi::Copy(dev_ctx, param, dev_ctx.GetPlace(), false, param_out); phi::Copy(dev_ctx, moment1, dev_ctx.GetPlace(), false, moment1_out); phi::Copy(dev_ctx, moment2, dev_ctx.GetPlace(), false, moment2_out); phi::Copy(dev_ctx, beta1_pow, beta1_pow.place(), false, beta1_pow_out); phi::Copy(dev_ctx, beta2_pow, beta2_pow.place(), false, beta2_pow_out); return; } // if with_decay = false, coeff = 0 if (!with_decay) { coeff_ = static_cast<MPDType>(0.0); } MPDType beta1_ = beta1.to<MPDType>(); MPDType beta2_ = beta2.to<MPDType>(); MPDType epsilon_ = epsilon.to<MPDType>(); VLOG(3) << "beta1_pow.numel() : " << beta1_pow.numel() << "beta2_pow.numel() : " << beta2_pow.numel(); VLOG(3) << "param.numel(): " << param.numel(); PADDLE_ENFORCE_EQ( beta1_pow_out->numel(), 1, errors::InvalidArgument("beta1 pow output size should be 1, but received " "value is:%d.", beta1_pow_out->numel())); PADDLE_ENFORCE_EQ( beta2_pow_out->numel(), 1, errors::InvalidArgument("beta2 pow output size should be 1, but received " "value is:%d.", beta2_pow_out->numel())); const MPDType* master_in_data = multi_precision ? master_param->data<MPDType>() : nullptr; MPDType* master_out_data = multi_precision ? dev_ctx.template Alloc<MPDType>(master_param_outs) : nullptr; if (grad.rows().size() == 0) { VLOG(3) << "grad row size is 0!!"; return; } std::vector<int64_t> cpu_rows(grad.rows().begin(), grad.rows().end()); bool is_strict_sorted = true; for (size_t i = 1; i < cpu_rows.size(); ++i) { if (cpu_rows[i - 1] >= cpu_rows[i]) { is_strict_sorted = false; break; } } phi::SelectedRows tmp_grad_merge; const phi::SelectedRows* grad_merge_ptr; if (is_strict_sorted) { grad_merge_ptr = &grad; } else { // merge duplicated rows if any. // The rows of grad_merge have been sorted inside MergeAdd functor paddle::operators::math::scatter::MergeAdd<Context, T> merge_func; merge_func(dev_ctx, grad, &tmp_grad_merge, true); grad_merge_ptr = &tmp_grad_merge; } auto& grad_merge = *grad_merge_ptr; auto& grad_tensor = grad_merge.value(); const T* grad_data = grad_tensor.template data<T>(); auto* grad_merge_rows = &grad_merge.rows(); paddle::framework::MixVector<int64_t> mixv_grad_merge_rows(grad_merge_rows); const int64_t* rows = mixv_grad_merge_rows.Data(dev_ctx.GetPlace()); auto row_numel = grad_tensor.numel() / grad_merge.rows().size(); if (beta1_pow.place() == CPUPlace() && beta2_pow.place() == CPUPlace()) { int threads = 512; int ndim = param.numel(); int blocks = (ndim + threads - 1) / threads; hipLaunchKernelGGL(( SparseAdamWCUDAKernelREG<T, MPDType>) , dim3(blocks), dim3(threads), 0, dev_ctx.stream(), beta1_, beta2_, epsilon_, coeff_, lr_ratio_, *beta1_pow.data<MPDType>(), *beta2_pow.data<MPDType>(), moment1.data<MPDType>(), dev_ctx.template Alloc<MPDType>(moment1_out), moment2.data<MPDType>(), dev_ctx.template Alloc<MPDType>(moment2_out), learning_rate.data<MPDType>(), grad_data, param.data<T>(), dev_ctx.template Alloc<T>(param_out), master_in_data, master_out_data, rows, row_numel, grad_merge.rows().size(), lazy_mode, ndim); if (!use_global_beta_pow) { // Update with cpu dev_ctx.template HostAlloc<MPDType>(beta1_pow_out)[0] = beta1_ * beta1_pow.data<MPDType>()[0]; dev_ctx.template HostAlloc<MPDType>(beta2_pow_out)[0] = beta2_ * beta2_pow.data<MPDType>()[0]; } } else { funcs::SparseAdamWFunctor<T, funcs::GPUAdamW, MPDType> functor( beta1_, beta2_, epsilon_, coeff_, lr_ratio_, beta1_pow.data<MPDType>(), beta2_pow.data<MPDType>(), moment1.data<MPDType>(), dev_ctx.template Alloc<MPDType>(moment1_out), moment2.data<MPDType>(), dev_ctx.template Alloc<MPDType>(moment2_out), learning_rate.data<MPDType>(), grad_data, param.data<T>(), dev_ctx.template Alloc<T>(param_out), master_in_data, master_out_data, rows, row_numel, grad_merge.rows().size(), lazy_mode); // FIXME(minqiyang): remove BinarySearch in GPU later funcs::ForRange<Context> for_range(dev_ctx, param.numel()); for_range(functor); if (!use_global_beta_pow) { // update beta1 and beta2 hipLaunchKernelGGL(( UpdateAdamWBetaPow<MPDType>), dim3(1), dim3(32), 0, dev_ctx.stream(), beta1_, beta2_, beta1_pow.data<MPDType>(), beta2_pow.data<MPDType>(), dev_ctx.template Alloc<MPDType>(beta1_pow_out), dev_ctx.template Alloc<MPDType>(beta2_pow_out)); } } } } // namespace sr } // namespace phi PD_REGISTER_KERNEL(adamw_dense_param_sparse_grad, GPU, ALL_LAYOUT, phi::sr::AdamwDenseParamSparseGradKernel, float, double, phi::dtype::float16) { // Skip beta1_pow, beta2_pow, skip_update data transform kernel->InputAt(5).SetBackend(phi::Backend::ALL_BACKEND); kernel->InputAt(6).SetBackend(phi::Backend::ALL_BACKEND); kernel->InputAt(8).SetBackend(phi::Backend::ALL_BACKEND); }
6833662ade8e9f33ff9df820bcf8883eb47b6a81.cu
// Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. #include <math.h> // for sqrt in CPU and CUDA #include <vector> #include "paddle/fluid/framework/tensor_util.h" #include "paddle/fluid/operators/math/selected_rows_functor.h" #include "paddle/phi/backends/gpu/gpu_context.h" #include "paddle/phi/common/amp_type_traits.h" #include "paddle/phi/common/float16.h" #include "paddle/phi/core/kernel_registry.h" #include "paddle/phi/kernels/copy_kernel.h" #include "paddle/phi/kernels/funcs/adam_functors.h" #include "paddle/phi/kernels/funcs/for_range.h" #include "paddle/phi/kernels/selected_rows/adamw_kernel.h" namespace phi { namespace sr { template <typename T> __global__ void UpdateAdamWBetaPow(T beta1, T beta2, const T* beta1_pow_, const T* beta2_pow_, T* beta1_pow_out, T* beta2_pow_out) { *beta1_pow_out = beta1 * beta1_pow_[0]; *beta2_pow_out = beta2 * beta2_pow_[0]; } template <typename T, typename MT> __global__ void SparseAdamWCUDAKernelREG(MT beta1, MT beta2, MT epsilon, MT coeff, MT lr_ratio, const MT beta1_pow, const MT beta2_pow, const MT* mom1_, MT* mom1_out_, const MT* mom2_, MT* mom2_out_, const MT* lr_, const T* grad_, const T* param_, T* param_out_, const MT* master_param, MT* master_param_out, const int64_t* rows_, int64_t row_numel, int64_t row_count, bool lazy_mode, int ndim) { int id = blockIdx.x * blockDim.x + threadIdx.x; MT lr = *lr_ * lr_ratio; for (; id < ndim; id += blockDim.x * gridDim.x) { auto row_idx = phi::funcs::BinarySearch<int64_t>(rows_, row_count, id / row_numel); if (lazy_mode && row_idx < 0) { return; } else { MT mom1 = static_cast<MT>(mom1_[id]); MT mom2 = static_cast<MT>(mom2_[id]); MT p = master_param ? master_param[id] : static_cast<MT>(param_[id]); MT g = row_idx >= 0 ? static_cast<MT>(grad_[row_idx * row_numel + id % row_numel]) : static_cast<MT>(0); p *= (static_cast<MT>(1.0) - lr * coeff); mom1 = beta1 * mom1 + (static_cast<MT>(1.0) - beta1) * g; mom2 = beta2 * mom2 + (static_cast<MT>(1.0) - beta2) * g * g; MT denom = (sqrt(mom2) / sqrt(static_cast<MT>(1.0) - beta2_pow)) + epsilon; p += (mom1 / denom) * (-(lr / (static_cast<MT>(1.0) - beta1_pow))); // Write back to global memory mom1_out_[id] = mom1; mom2_out_[id] = mom2; param_out_[id] = static_cast<T>(p); if (master_param_out) { master_param_out[id] = p; } } } } template <typename T, typename Context> void AdamwDenseParamSparseGradKernel( const Context& dev_ctx, const DenseTensor& param, const SelectedRows& grad, const DenseTensor& learning_rate, const DenseTensor& moment1, const DenseTensor& moment2, const DenseTensor& beta1_pow, const DenseTensor& beta2_pow, const paddle::optional<DenseTensor>& master_param, const paddle::optional<DenseTensor>& skip_update, const Scalar& beta1, const Scalar& beta2, const Scalar& epsilon, float lr_ratio, float coeff, bool with_decay, bool lazy_mode, int64_t min_row_size_to_use_multithread, bool multi_precision, bool use_global_beta_pow, DenseTensor* param_out, DenseTensor* moment1_out, DenseTensor* moment2_out, DenseTensor* beta1_pow_out, DenseTensor* beta2_pow_out, DenseTensor* master_param_outs) { using MPDType = typename phi::dtype::MPTypeTrait<T>::Type; VLOG(4) << "use_global_beta_pow:" << use_global_beta_pow; MPDType coeff_ = static_cast<MPDType>(coeff); MPDType lr_ratio_ = static_cast<MPDType>(lr_ratio); bool skip_update_ = false; if (skip_update.is_initialized()) { PADDLE_ENFORCE_EQ( skip_update->numel(), 1, errors::InvalidArgument("Input(SkipUpdate) size must be 1, but get %d", skip_update->numel())); std::vector<bool> skip_update_vec; paddle::framework::TensorToVector(*skip_update, dev_ctx, &skip_update_vec); skip_update_ = skip_update_vec[0]; } // skip_update=true, just copy input to output, and TensorCopy will call // mutable_data if (skip_update_) { VLOG(4) << "Adamw skip update"; phi::Copy(dev_ctx, param, dev_ctx.GetPlace(), false, param_out); phi::Copy(dev_ctx, moment1, dev_ctx.GetPlace(), false, moment1_out); phi::Copy(dev_ctx, moment2, dev_ctx.GetPlace(), false, moment2_out); phi::Copy(dev_ctx, beta1_pow, beta1_pow.place(), false, beta1_pow_out); phi::Copy(dev_ctx, beta2_pow, beta2_pow.place(), false, beta2_pow_out); return; } // if with_decay = false, coeff = 0 if (!with_decay) { coeff_ = static_cast<MPDType>(0.0); } MPDType beta1_ = beta1.to<MPDType>(); MPDType beta2_ = beta2.to<MPDType>(); MPDType epsilon_ = epsilon.to<MPDType>(); VLOG(3) << "beta1_pow.numel() : " << beta1_pow.numel() << "beta2_pow.numel() : " << beta2_pow.numel(); VLOG(3) << "param.numel(): " << param.numel(); PADDLE_ENFORCE_EQ( beta1_pow_out->numel(), 1, errors::InvalidArgument("beta1 pow output size should be 1, but received " "value is:%d.", beta1_pow_out->numel())); PADDLE_ENFORCE_EQ( beta2_pow_out->numel(), 1, errors::InvalidArgument("beta2 pow output size should be 1, but received " "value is:%d.", beta2_pow_out->numel())); const MPDType* master_in_data = multi_precision ? master_param->data<MPDType>() : nullptr; MPDType* master_out_data = multi_precision ? dev_ctx.template Alloc<MPDType>(master_param_outs) : nullptr; if (grad.rows().size() == 0) { VLOG(3) << "grad row size is 0!!"; return; } std::vector<int64_t> cpu_rows(grad.rows().begin(), grad.rows().end()); bool is_strict_sorted = true; for (size_t i = 1; i < cpu_rows.size(); ++i) { if (cpu_rows[i - 1] >= cpu_rows[i]) { is_strict_sorted = false; break; } } phi::SelectedRows tmp_grad_merge; const phi::SelectedRows* grad_merge_ptr; if (is_strict_sorted) { grad_merge_ptr = &grad; } else { // merge duplicated rows if any. // The rows of grad_merge have been sorted inside MergeAdd functor paddle::operators::math::scatter::MergeAdd<Context, T> merge_func; merge_func(dev_ctx, grad, &tmp_grad_merge, true); grad_merge_ptr = &tmp_grad_merge; } auto& grad_merge = *grad_merge_ptr; auto& grad_tensor = grad_merge.value(); const T* grad_data = grad_tensor.template data<T>(); auto* grad_merge_rows = &grad_merge.rows(); paddle::framework::MixVector<int64_t> mixv_grad_merge_rows(grad_merge_rows); const int64_t* rows = mixv_grad_merge_rows.Data(dev_ctx.GetPlace()); auto row_numel = grad_tensor.numel() / grad_merge.rows().size(); if (beta1_pow.place() == CPUPlace() && beta2_pow.place() == CPUPlace()) { int threads = 512; int ndim = param.numel(); int blocks = (ndim + threads - 1) / threads; SparseAdamWCUDAKernelREG<T, MPDType> <<<blocks, threads, 0, dev_ctx.stream()>>>( beta1_, beta2_, epsilon_, coeff_, lr_ratio_, *beta1_pow.data<MPDType>(), *beta2_pow.data<MPDType>(), moment1.data<MPDType>(), dev_ctx.template Alloc<MPDType>(moment1_out), moment2.data<MPDType>(), dev_ctx.template Alloc<MPDType>(moment2_out), learning_rate.data<MPDType>(), grad_data, param.data<T>(), dev_ctx.template Alloc<T>(param_out), master_in_data, master_out_data, rows, row_numel, grad_merge.rows().size(), lazy_mode, ndim); if (!use_global_beta_pow) { // Update with cpu dev_ctx.template HostAlloc<MPDType>(beta1_pow_out)[0] = beta1_ * beta1_pow.data<MPDType>()[0]; dev_ctx.template HostAlloc<MPDType>(beta2_pow_out)[0] = beta2_ * beta2_pow.data<MPDType>()[0]; } } else { funcs::SparseAdamWFunctor<T, funcs::GPUAdamW, MPDType> functor( beta1_, beta2_, epsilon_, coeff_, lr_ratio_, beta1_pow.data<MPDType>(), beta2_pow.data<MPDType>(), moment1.data<MPDType>(), dev_ctx.template Alloc<MPDType>(moment1_out), moment2.data<MPDType>(), dev_ctx.template Alloc<MPDType>(moment2_out), learning_rate.data<MPDType>(), grad_data, param.data<T>(), dev_ctx.template Alloc<T>(param_out), master_in_data, master_out_data, rows, row_numel, grad_merge.rows().size(), lazy_mode); // FIXME(minqiyang): remove BinarySearch in GPU later funcs::ForRange<Context> for_range(dev_ctx, param.numel()); for_range(functor); if (!use_global_beta_pow) { // update beta1 and beta2 UpdateAdamWBetaPow<MPDType><<<1, 32, 0, dev_ctx.stream()>>>( beta1_, beta2_, beta1_pow.data<MPDType>(), beta2_pow.data<MPDType>(), dev_ctx.template Alloc<MPDType>(beta1_pow_out), dev_ctx.template Alloc<MPDType>(beta2_pow_out)); } } } } // namespace sr } // namespace phi PD_REGISTER_KERNEL(adamw_dense_param_sparse_grad, GPU, ALL_LAYOUT, phi::sr::AdamwDenseParamSparseGradKernel, float, double, phi::dtype::float16) { // Skip beta1_pow, beta2_pow, skip_update data transform kernel->InputAt(5).SetBackend(phi::Backend::ALL_BACKEND); kernel->InputAt(6).SetBackend(phi::Backend::ALL_BACKEND); kernel->InputAt(8).SetBackend(phi::Backend::ALL_BACKEND); }
e06780f9cec8476e405bdbba3accdd7df2383e0e.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /* -- MAGMA (version 1.3.0) -- Univ. of Tennessee, Knoxville Univ. of California, Berkeley Univ. of Colorado, Denver November 2012 @generated s Wed Nov 14 22:53:45 2012 */ #include "common_magma.h" /* Matrix is divided into 64 x n block rows. Each block has 64 threads. Each thread copies one row, iterating across all columns. The bottom block of rows may be partially outside the matrix; if so, rows outside the matrix (row >= m) are disabled. @author Mark Gates */ __global__ void slacpy_kernel( int m, int n, const float *A, int lda, float *B, int ldb ) { int row = blockIdx.x*64 + threadIdx.x; if ( row < m ) { A += row; B += row; const float *Aend = A + lda*n; while( A < Aend ) { *B = *A; A += lda; B += ldb; } } } extern "C" void magmablas_slacpy( char uplo, magma_int_t m, magma_int_t n, const float *A, magma_int_t lda, float *B, magma_int_t ldb ) { /* Note ======== - UPLO Parameter is disabled - Do we want to provide a generic function to the user with all the options? Purpose ======= SLACPY copies all or part of a two-dimensional matrix A to another matrix B. Arguments ========= UPLO (input) CHARACTER*1 Specifies the part of the matrix A to be copied to B. = 'U': Upper triangular part = 'L': Lower triangular part Otherwise: All of the matrix A M (input) INTEGER The number of rows of the matrix A. M >= 0. N (input) INTEGER The number of columns of the matrix A. N >= 0. A (input) COMPLEX DOUBLE PRECISION array, dimension (LDA,N) The m by n matrix A. If UPLO = 'U', only the upper triangle or trapezoid is accessed; if UPLO = 'L', only the lower triangle or trapezoid is accessed. LDA (input) INTEGER The leading dimension of the array A. LDA >= max(1,M). B (output) COMPLEX DOUBLE PRECISION array, dimension (LDB,N) On exit, B = A in the locations specified by UPLO. LDB (input) INTEGER The leading dimension of the array B. LDB >= max(1,M). ===================================================================== */ dim3 threads( 64 ); dim3 grid( m/64 + (m%64 != 0) ); //printf( "m %d, n %d, grid %d, threads %d\n", m, n, grid.x, threads.x ); if ( m == 0 || n == 0 ) return; if ( (uplo == 'U') || (uplo == 'u') ) { fprintf(stderr, "lacpy upper is not implemented\n"); } else if ( (uplo == 'L') || (uplo == 'l') ) { fprintf(stderr, "lacpy lower is not implemented\n"); } else { hipLaunchKernelGGL(( slacpy_kernel), dim3(grid), dim3(threads), 0, magma_stream , m, n, A, lda, B, ldb ); } }
e06780f9cec8476e405bdbba3accdd7df2383e0e.cu
/* -- MAGMA (version 1.3.0) -- Univ. of Tennessee, Knoxville Univ. of California, Berkeley Univ. of Colorado, Denver November 2012 @generated s Wed Nov 14 22:53:45 2012 */ #include "common_magma.h" /* Matrix is divided into 64 x n block rows. Each block has 64 threads. Each thread copies one row, iterating across all columns. The bottom block of rows may be partially outside the matrix; if so, rows outside the matrix (row >= m) are disabled. @author Mark Gates */ __global__ void slacpy_kernel( int m, int n, const float *A, int lda, float *B, int ldb ) { int row = blockIdx.x*64 + threadIdx.x; if ( row < m ) { A += row; B += row; const float *Aend = A + lda*n; while( A < Aend ) { *B = *A; A += lda; B += ldb; } } } extern "C" void magmablas_slacpy( char uplo, magma_int_t m, magma_int_t n, const float *A, magma_int_t lda, float *B, magma_int_t ldb ) { /* Note ======== - UPLO Parameter is disabled - Do we want to provide a generic function to the user with all the options? Purpose ======= SLACPY copies all or part of a two-dimensional matrix A to another matrix B. Arguments ========= UPLO (input) CHARACTER*1 Specifies the part of the matrix A to be copied to B. = 'U': Upper triangular part = 'L': Lower triangular part Otherwise: All of the matrix A M (input) INTEGER The number of rows of the matrix A. M >= 0. N (input) INTEGER The number of columns of the matrix A. N >= 0. A (input) COMPLEX DOUBLE PRECISION array, dimension (LDA,N) The m by n matrix A. If UPLO = 'U', only the upper triangle or trapezoid is accessed; if UPLO = 'L', only the lower triangle or trapezoid is accessed. LDA (input) INTEGER The leading dimension of the array A. LDA >= max(1,M). B (output) COMPLEX DOUBLE PRECISION array, dimension (LDB,N) On exit, B = A in the locations specified by UPLO. LDB (input) INTEGER The leading dimension of the array B. LDB >= max(1,M). ===================================================================== */ dim3 threads( 64 ); dim3 grid( m/64 + (m%64 != 0) ); //printf( "m %d, n %d, grid %d, threads %d\n", m, n, grid.x, threads.x ); if ( m == 0 || n == 0 ) return; if ( (uplo == 'U') || (uplo == 'u') ) { fprintf(stderr, "lacpy upper is not implemented\n"); } else if ( (uplo == 'L') || (uplo == 'l') ) { fprintf(stderr, "lacpy lower is not implemented\n"); } else { slacpy_kernel<<< grid, threads, 0, magma_stream >>> ( m, n, A, lda, B, ldb ); } }
3d2f06411be5c4d97b13c8efaf38ff66da6bbcee.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include "des_kernel_encrypt.h" #include "des_kernel_salt_instances.h" #ifdef DESGPU_COMPILE_ALL_SALTS void des_25_encrypt_salt2816(const size_t num_blocks, const size_t threads_per_block, vtype* const unchecked_hashes, const vtype* const bitsplitted_keys) { hipLaunchKernelGGL(( des_25_encrypt<31, 0, 1, 2, 3, 4, 3, 4, 21, 22, 7, 24, 15, 16, 17, 18, 19, 20, 19, 20, 5, 6, 23, 8, 63, 32, 33, 34, 35, 36, 35, 36, 53, 54, 39, 56, 47, 48, 49, 50, 51, 52, 51, 52, 37, 38, 55, 40>), dim3(num_blocks), dim3(threads_per_block), 0, 0, unchecked_hashes, bitsplitted_keys); } void des_25_encrypt_salt2817(const size_t num_blocks, const size_t threads_per_block, vtype* const unchecked_hashes, const vtype* const bitsplitted_keys) { hipLaunchKernelGGL(( des_25_encrypt<15, 0, 1, 2, 3, 4, 3, 4, 21, 22, 7, 24, 31, 16, 17, 18, 19, 20, 19, 20, 5, 6, 23, 8, 47, 32, 33, 34, 35, 36, 35, 36, 53, 54, 39, 56, 63, 48, 49, 50, 51, 52, 51, 52, 37, 38, 55, 40>), dim3(num_blocks), dim3(threads_per_block), 0, 0, unchecked_hashes, bitsplitted_keys); } void des_25_encrypt_salt2818(const size_t num_blocks, const size_t threads_per_block, vtype* const unchecked_hashes, const vtype* const bitsplitted_keys) { hipLaunchKernelGGL(( des_25_encrypt<31, 16, 1, 2, 3, 4, 3, 4, 21, 22, 7, 24, 15, 0, 17, 18, 19, 20, 19, 20, 5, 6, 23, 8, 63, 48, 33, 34, 35, 36, 35, 36, 53, 54, 39, 56, 47, 32, 49, 50, 51, 52, 51, 52, 37, 38, 55, 40>), dim3(num_blocks), dim3(threads_per_block), 0, 0, unchecked_hashes, bitsplitted_keys); } void des_25_encrypt_salt2819(const size_t num_blocks, const size_t threads_per_block, vtype* const unchecked_hashes, const vtype* const bitsplitted_keys) { hipLaunchKernelGGL(( des_25_encrypt<15, 16, 1, 2, 3, 4, 3, 4, 21, 22, 7, 24, 31, 0, 17, 18, 19, 20, 19, 20, 5, 6, 23, 8, 47, 48, 33, 34, 35, 36, 35, 36, 53, 54, 39, 56, 63, 32, 49, 50, 51, 52, 51, 52, 37, 38, 55, 40>), dim3(num_blocks), dim3(threads_per_block), 0, 0, unchecked_hashes, bitsplitted_keys); } void des_25_encrypt_salt2820(const size_t num_blocks, const size_t threads_per_block, vtype* const unchecked_hashes, const vtype* const bitsplitted_keys) { hipLaunchKernelGGL(( des_25_encrypt<31, 0, 17, 2, 3, 4, 3, 4, 21, 22, 7, 24, 15, 16, 1, 18, 19, 20, 19, 20, 5, 6, 23, 8, 63, 32, 49, 34, 35, 36, 35, 36, 53, 54, 39, 56, 47, 48, 33, 50, 51, 52, 51, 52, 37, 38, 55, 40>), dim3(num_blocks), dim3(threads_per_block), 0, 0, unchecked_hashes, bitsplitted_keys); } void des_25_encrypt_salt2821(const size_t num_blocks, const size_t threads_per_block, vtype* const unchecked_hashes, const vtype* const bitsplitted_keys) { hipLaunchKernelGGL(( des_25_encrypt<15, 0, 17, 2, 3, 4, 3, 4, 21, 22, 7, 24, 31, 16, 1, 18, 19, 20, 19, 20, 5, 6, 23, 8, 47, 32, 49, 34, 35, 36, 35, 36, 53, 54, 39, 56, 63, 48, 33, 50, 51, 52, 51, 52, 37, 38, 55, 40>), dim3(num_blocks), dim3(threads_per_block), 0, 0, unchecked_hashes, bitsplitted_keys); } void des_25_encrypt_salt2822(const size_t num_blocks, const size_t threads_per_block, vtype* const unchecked_hashes, const vtype* const bitsplitted_keys) { hipLaunchKernelGGL(( des_25_encrypt<31, 16, 17, 2, 3, 4, 3, 4, 21, 22, 7, 24, 15, 0, 1, 18, 19, 20, 19, 20, 5, 6, 23, 8, 63, 48, 49, 34, 35, 36, 35, 36, 53, 54, 39, 56, 47, 32, 33, 50, 51, 52, 51, 52, 37, 38, 55, 40>), dim3(num_blocks), dim3(threads_per_block), 0, 0, unchecked_hashes, bitsplitted_keys); } void des_25_encrypt_salt2823(const size_t num_blocks, const size_t threads_per_block, vtype* const unchecked_hashes, const vtype* const bitsplitted_keys) { hipLaunchKernelGGL(( des_25_encrypt<15, 16, 17, 2, 3, 4, 3, 4, 21, 22, 7, 24, 31, 0, 1, 18, 19, 20, 19, 20, 5, 6, 23, 8, 47, 48, 49, 34, 35, 36, 35, 36, 53, 54, 39, 56, 63, 32, 33, 50, 51, 52, 51, 52, 37, 38, 55, 40>), dim3(num_blocks), dim3(threads_per_block), 0, 0, unchecked_hashes, bitsplitted_keys); } void des_25_encrypt_salt2824(const size_t num_blocks, const size_t threads_per_block, vtype* const unchecked_hashes, const vtype* const bitsplitted_keys) { hipLaunchKernelGGL(( des_25_encrypt<31, 0, 1, 18, 3, 4, 3, 4, 21, 22, 7, 24, 15, 16, 17, 2, 19, 20, 19, 20, 5, 6, 23, 8, 63, 32, 33, 50, 35, 36, 35, 36, 53, 54, 39, 56, 47, 48, 49, 34, 51, 52, 51, 52, 37, 38, 55, 40>), dim3(num_blocks), dim3(threads_per_block), 0, 0, unchecked_hashes, bitsplitted_keys); } void des_25_encrypt_salt2825(const size_t num_blocks, const size_t threads_per_block, vtype* const unchecked_hashes, const vtype* const bitsplitted_keys) { hipLaunchKernelGGL(( des_25_encrypt<15, 0, 1, 18, 3, 4, 3, 4, 21, 22, 7, 24, 31, 16, 17, 2, 19, 20, 19, 20, 5, 6, 23, 8, 47, 32, 33, 50, 35, 36, 35, 36, 53, 54, 39, 56, 63, 48, 49, 34, 51, 52, 51, 52, 37, 38, 55, 40>), dim3(num_blocks), dim3(threads_per_block), 0, 0, unchecked_hashes, bitsplitted_keys); } void des_25_encrypt_salt2826(const size_t num_blocks, const size_t threads_per_block, vtype* const unchecked_hashes, const vtype* const bitsplitted_keys) { hipLaunchKernelGGL(( des_25_encrypt<31, 16, 1, 18, 3, 4, 3, 4, 21, 22, 7, 24, 15, 0, 17, 2, 19, 20, 19, 20, 5, 6, 23, 8, 63, 48, 33, 50, 35, 36, 35, 36, 53, 54, 39, 56, 47, 32, 49, 34, 51, 52, 51, 52, 37, 38, 55, 40>), dim3(num_blocks), dim3(threads_per_block), 0, 0, unchecked_hashes, bitsplitted_keys); } void des_25_encrypt_salt2827(const size_t num_blocks, const size_t threads_per_block, vtype* const unchecked_hashes, const vtype* const bitsplitted_keys) { hipLaunchKernelGGL(( des_25_encrypt<15, 16, 1, 18, 3, 4, 3, 4, 21, 22, 7, 24, 31, 0, 17, 2, 19, 20, 19, 20, 5, 6, 23, 8, 47, 48, 33, 50, 35, 36, 35, 36, 53, 54, 39, 56, 63, 32, 49, 34, 51, 52, 51, 52, 37, 38, 55, 40>), dim3(num_blocks), dim3(threads_per_block), 0, 0, unchecked_hashes, bitsplitted_keys); } void des_25_encrypt_salt2828(const size_t num_blocks, const size_t threads_per_block, vtype* const unchecked_hashes, const vtype* const bitsplitted_keys) { hipLaunchKernelGGL(( des_25_encrypt<31, 0, 17, 18, 3, 4, 3, 4, 21, 22, 7, 24, 15, 16, 1, 2, 19, 20, 19, 20, 5, 6, 23, 8, 63, 32, 49, 50, 35, 36, 35, 36, 53, 54, 39, 56, 47, 48, 33, 34, 51, 52, 51, 52, 37, 38, 55, 40>), dim3(num_blocks), dim3(threads_per_block), 0, 0, unchecked_hashes, bitsplitted_keys); } void des_25_encrypt_salt2829(const size_t num_blocks, const size_t threads_per_block, vtype* const unchecked_hashes, const vtype* const bitsplitted_keys) { hipLaunchKernelGGL(( des_25_encrypt<15, 0, 17, 18, 3, 4, 3, 4, 21, 22, 7, 24, 31, 16, 1, 2, 19, 20, 19, 20, 5, 6, 23, 8, 47, 32, 49, 50, 35, 36, 35, 36, 53, 54, 39, 56, 63, 48, 33, 34, 51, 52, 51, 52, 37, 38, 55, 40>), dim3(num_blocks), dim3(threads_per_block), 0, 0, unchecked_hashes, bitsplitted_keys); } void des_25_encrypt_salt2830(const size_t num_blocks, const size_t threads_per_block, vtype* const unchecked_hashes, const vtype* const bitsplitted_keys) { hipLaunchKernelGGL(( des_25_encrypt<31, 16, 17, 18, 3, 4, 3, 4, 21, 22, 7, 24, 15, 0, 1, 2, 19, 20, 19, 20, 5, 6, 23, 8, 63, 48, 49, 50, 35, 36, 35, 36, 53, 54, 39, 56, 47, 32, 33, 34, 51, 52, 51, 52, 37, 38, 55, 40>), dim3(num_blocks), dim3(threads_per_block), 0, 0, unchecked_hashes, bitsplitted_keys); } void des_25_encrypt_salt2831(const size_t num_blocks, const size_t threads_per_block, vtype* const unchecked_hashes, const vtype* const bitsplitted_keys) { hipLaunchKernelGGL(( des_25_encrypt<15, 16, 17, 18, 3, 4, 3, 4, 21, 22, 7, 24, 31, 0, 1, 2, 19, 20, 19, 20, 5, 6, 23, 8, 47, 48, 49, 50, 35, 36, 35, 36, 53, 54, 39, 56, 63, 32, 33, 34, 51, 52, 51, 52, 37, 38, 55, 40>), dim3(num_blocks), dim3(threads_per_block), 0, 0, unchecked_hashes, bitsplitted_keys); } void des_25_encrypt_salt2832(const size_t num_blocks, const size_t threads_per_block, vtype* const unchecked_hashes, const vtype* const bitsplitted_keys) { hipLaunchKernelGGL(( des_25_encrypt<31, 0, 1, 2, 19, 4, 3, 4, 21, 22, 7, 24, 15, 16, 17, 18, 3, 20, 19, 20, 5, 6, 23, 8, 63, 32, 33, 34, 51, 36, 35, 36, 53, 54, 39, 56, 47, 48, 49, 50, 35, 52, 51, 52, 37, 38, 55, 40>), dim3(num_blocks), dim3(threads_per_block), 0, 0, unchecked_hashes, bitsplitted_keys); } void des_25_encrypt_salt2833(const size_t num_blocks, const size_t threads_per_block, vtype* const unchecked_hashes, const vtype* const bitsplitted_keys) { hipLaunchKernelGGL(( des_25_encrypt<15, 0, 1, 2, 19, 4, 3, 4, 21, 22, 7, 24, 31, 16, 17, 18, 3, 20, 19, 20, 5, 6, 23, 8, 47, 32, 33, 34, 51, 36, 35, 36, 53, 54, 39, 56, 63, 48, 49, 50, 35, 52, 51, 52, 37, 38, 55, 40>), dim3(num_blocks), dim3(threads_per_block), 0, 0, unchecked_hashes, bitsplitted_keys); } void des_25_encrypt_salt2834(const size_t num_blocks, const size_t threads_per_block, vtype* const unchecked_hashes, const vtype* const bitsplitted_keys) { hipLaunchKernelGGL(( des_25_encrypt<31, 16, 1, 2, 19, 4, 3, 4, 21, 22, 7, 24, 15, 0, 17, 18, 3, 20, 19, 20, 5, 6, 23, 8, 63, 48, 33, 34, 51, 36, 35, 36, 53, 54, 39, 56, 47, 32, 49, 50, 35, 52, 51, 52, 37, 38, 55, 40>), dim3(num_blocks), dim3(threads_per_block), 0, 0, unchecked_hashes, bitsplitted_keys); } void des_25_encrypt_salt2835(const size_t num_blocks, const size_t threads_per_block, vtype* const unchecked_hashes, const vtype* const bitsplitted_keys) { hipLaunchKernelGGL(( des_25_encrypt<15, 16, 1, 2, 19, 4, 3, 4, 21, 22, 7, 24, 31, 0, 17, 18, 3, 20, 19, 20, 5, 6, 23, 8, 47, 48, 33, 34, 51, 36, 35, 36, 53, 54, 39, 56, 63, 32, 49, 50, 35, 52, 51, 52, 37, 38, 55, 40>), dim3(num_blocks), dim3(threads_per_block), 0, 0, unchecked_hashes, bitsplitted_keys); } void des_25_encrypt_salt2836(const size_t num_blocks, const size_t threads_per_block, vtype* const unchecked_hashes, const vtype* const bitsplitted_keys) { hipLaunchKernelGGL(( des_25_encrypt<31, 0, 17, 2, 19, 4, 3, 4, 21, 22, 7, 24, 15, 16, 1, 18, 3, 20, 19, 20, 5, 6, 23, 8, 63, 32, 49, 34, 51, 36, 35, 36, 53, 54, 39, 56, 47, 48, 33, 50, 35, 52, 51, 52, 37, 38, 55, 40>), dim3(num_blocks), dim3(threads_per_block), 0, 0, unchecked_hashes, bitsplitted_keys); } void des_25_encrypt_salt2837(const size_t num_blocks, const size_t threads_per_block, vtype* const unchecked_hashes, const vtype* const bitsplitted_keys) { hipLaunchKernelGGL(( des_25_encrypt<15, 0, 17, 2, 19, 4, 3, 4, 21, 22, 7, 24, 31, 16, 1, 18, 3, 20, 19, 20, 5, 6, 23, 8, 47, 32, 49, 34, 51, 36, 35, 36, 53, 54, 39, 56, 63, 48, 33, 50, 35, 52, 51, 52, 37, 38, 55, 40>), dim3(num_blocks), dim3(threads_per_block), 0, 0, unchecked_hashes, bitsplitted_keys); } void des_25_encrypt_salt2838(const size_t num_blocks, const size_t threads_per_block, vtype* const unchecked_hashes, const vtype* const bitsplitted_keys) { hipLaunchKernelGGL(( des_25_encrypt<31, 16, 17, 2, 19, 4, 3, 4, 21, 22, 7, 24, 15, 0, 1, 18, 3, 20, 19, 20, 5, 6, 23, 8, 63, 48, 49, 34, 51, 36, 35, 36, 53, 54, 39, 56, 47, 32, 33, 50, 35, 52, 51, 52, 37, 38, 55, 40>), dim3(num_blocks), dim3(threads_per_block), 0, 0, unchecked_hashes, bitsplitted_keys); } void des_25_encrypt_salt2839(const size_t num_blocks, const size_t threads_per_block, vtype* const unchecked_hashes, const vtype* const bitsplitted_keys) { hipLaunchKernelGGL(( des_25_encrypt<15, 16, 17, 2, 19, 4, 3, 4, 21, 22, 7, 24, 31, 0, 1, 18, 3, 20, 19, 20, 5, 6, 23, 8, 47, 48, 49, 34, 51, 36, 35, 36, 53, 54, 39, 56, 63, 32, 33, 50, 35, 52, 51, 52, 37, 38, 55, 40>), dim3(num_blocks), dim3(threads_per_block), 0, 0, unchecked_hashes, bitsplitted_keys); } void des_25_encrypt_salt2840(const size_t num_blocks, const size_t threads_per_block, vtype* const unchecked_hashes, const vtype* const bitsplitted_keys) { hipLaunchKernelGGL(( des_25_encrypt<31, 0, 1, 18, 19, 4, 3, 4, 21, 22, 7, 24, 15, 16, 17, 2, 3, 20, 19, 20, 5, 6, 23, 8, 63, 32, 33, 50, 51, 36, 35, 36, 53, 54, 39, 56, 47, 48, 49, 34, 35, 52, 51, 52, 37, 38, 55, 40>), dim3(num_blocks), dim3(threads_per_block), 0, 0, unchecked_hashes, bitsplitted_keys); } void des_25_encrypt_salt2841(const size_t num_blocks, const size_t threads_per_block, vtype* const unchecked_hashes, const vtype* const bitsplitted_keys) { hipLaunchKernelGGL(( des_25_encrypt<15, 0, 1, 18, 19, 4, 3, 4, 21, 22, 7, 24, 31, 16, 17, 2, 3, 20, 19, 20, 5, 6, 23, 8, 47, 32, 33, 50, 51, 36, 35, 36, 53, 54, 39, 56, 63, 48, 49, 34, 35, 52, 51, 52, 37, 38, 55, 40>), dim3(num_blocks), dim3(threads_per_block), 0, 0, unchecked_hashes, bitsplitted_keys); } void des_25_encrypt_salt2842(const size_t num_blocks, const size_t threads_per_block, vtype* const unchecked_hashes, const vtype* const bitsplitted_keys) { hipLaunchKernelGGL(( des_25_encrypt<31, 16, 1, 18, 19, 4, 3, 4, 21, 22, 7, 24, 15, 0, 17, 2, 3, 20, 19, 20, 5, 6, 23, 8, 63, 48, 33, 50, 51, 36, 35, 36, 53, 54, 39, 56, 47, 32, 49, 34, 35, 52, 51, 52, 37, 38, 55, 40>), dim3(num_blocks), dim3(threads_per_block), 0, 0, unchecked_hashes, bitsplitted_keys); } void des_25_encrypt_salt2843(const size_t num_blocks, const size_t threads_per_block, vtype* const unchecked_hashes, const vtype* const bitsplitted_keys) { hipLaunchKernelGGL(( des_25_encrypt<15, 16, 1, 18, 19, 4, 3, 4, 21, 22, 7, 24, 31, 0, 17, 2, 3, 20, 19, 20, 5, 6, 23, 8, 47, 48, 33, 50, 51, 36, 35, 36, 53, 54, 39, 56, 63, 32, 49, 34, 35, 52, 51, 52, 37, 38, 55, 40>), dim3(num_blocks), dim3(threads_per_block), 0, 0, unchecked_hashes, bitsplitted_keys); } void des_25_encrypt_salt2844(const size_t num_blocks, const size_t threads_per_block, vtype* const unchecked_hashes, const vtype* const bitsplitted_keys) { hipLaunchKernelGGL(( des_25_encrypt<31, 0, 17, 18, 19, 4, 3, 4, 21, 22, 7, 24, 15, 16, 1, 2, 3, 20, 19, 20, 5, 6, 23, 8, 63, 32, 49, 50, 51, 36, 35, 36, 53, 54, 39, 56, 47, 48, 33, 34, 35, 52, 51, 52, 37, 38, 55, 40>), dim3(num_blocks), dim3(threads_per_block), 0, 0, unchecked_hashes, bitsplitted_keys); } void des_25_encrypt_salt2845(const size_t num_blocks, const size_t threads_per_block, vtype* const unchecked_hashes, const vtype* const bitsplitted_keys) { hipLaunchKernelGGL(( des_25_encrypt<15, 0, 17, 18, 19, 4, 3, 4, 21, 22, 7, 24, 31, 16, 1, 2, 3, 20, 19, 20, 5, 6, 23, 8, 47, 32, 49, 50, 51, 36, 35, 36, 53, 54, 39, 56, 63, 48, 33, 34, 35, 52, 51, 52, 37, 38, 55, 40>), dim3(num_blocks), dim3(threads_per_block), 0, 0, unchecked_hashes, bitsplitted_keys); } void des_25_encrypt_salt2846(const size_t num_blocks, const size_t threads_per_block, vtype* const unchecked_hashes, const vtype* const bitsplitted_keys) { hipLaunchKernelGGL(( des_25_encrypt<31, 16, 17, 18, 19, 4, 3, 4, 21, 22, 7, 24, 15, 0, 1, 2, 3, 20, 19, 20, 5, 6, 23, 8, 63, 48, 49, 50, 51, 36, 35, 36, 53, 54, 39, 56, 47, 32, 33, 34, 35, 52, 51, 52, 37, 38, 55, 40>), dim3(num_blocks), dim3(threads_per_block), 0, 0, unchecked_hashes, bitsplitted_keys); } void des_25_encrypt_salt2847(const size_t num_blocks, const size_t threads_per_block, vtype* const unchecked_hashes, const vtype* const bitsplitted_keys) { hipLaunchKernelGGL(( des_25_encrypt<15, 16, 17, 18, 19, 4, 3, 4, 21, 22, 7, 24, 31, 0, 1, 2, 3, 20, 19, 20, 5, 6, 23, 8, 47, 48, 49, 50, 51, 36, 35, 36, 53, 54, 39, 56, 63, 32, 33, 34, 35, 52, 51, 52, 37, 38, 55, 40>), dim3(num_blocks), dim3(threads_per_block), 0, 0, unchecked_hashes, bitsplitted_keys); } void des_25_encrypt_salt2848(const size_t num_blocks, const size_t threads_per_block, vtype* const unchecked_hashes, const vtype* const bitsplitted_keys) { hipLaunchKernelGGL(( des_25_encrypt<31, 0, 1, 2, 3, 20, 3, 4, 21, 22, 7, 24, 15, 16, 17, 18, 19, 4, 19, 20, 5, 6, 23, 8, 63, 32, 33, 34, 35, 52, 35, 36, 53, 54, 39, 56, 47, 48, 49, 50, 51, 36, 51, 52, 37, 38, 55, 40>), dim3(num_blocks), dim3(threads_per_block), 0, 0, unchecked_hashes, bitsplitted_keys); } void des_25_encrypt_salt2849(const size_t num_blocks, const size_t threads_per_block, vtype* const unchecked_hashes, const vtype* const bitsplitted_keys) { hipLaunchKernelGGL(( des_25_encrypt<15, 0, 1, 2, 3, 20, 3, 4, 21, 22, 7, 24, 31, 16, 17, 18, 19, 4, 19, 20, 5, 6, 23, 8, 47, 32, 33, 34, 35, 52, 35, 36, 53, 54, 39, 56, 63, 48, 49, 50, 51, 36, 51, 52, 37, 38, 55, 40>), dim3(num_blocks), dim3(threads_per_block), 0, 0, unchecked_hashes, bitsplitted_keys); } void des_25_encrypt_salt2850(const size_t num_blocks, const size_t threads_per_block, vtype* const unchecked_hashes, const vtype* const bitsplitted_keys) { hipLaunchKernelGGL(( des_25_encrypt<31, 16, 1, 2, 3, 20, 3, 4, 21, 22, 7, 24, 15, 0, 17, 18, 19, 4, 19, 20, 5, 6, 23, 8, 63, 48, 33, 34, 35, 52, 35, 36, 53, 54, 39, 56, 47, 32, 49, 50, 51, 36, 51, 52, 37, 38, 55, 40>), dim3(num_blocks), dim3(threads_per_block), 0, 0, unchecked_hashes, bitsplitted_keys); } void des_25_encrypt_salt2851(const size_t num_blocks, const size_t threads_per_block, vtype* const unchecked_hashes, const vtype* const bitsplitted_keys) { hipLaunchKernelGGL(( des_25_encrypt<15, 16, 1, 2, 3, 20, 3, 4, 21, 22, 7, 24, 31, 0, 17, 18, 19, 4, 19, 20, 5, 6, 23, 8, 47, 48, 33, 34, 35, 52, 35, 36, 53, 54, 39, 56, 63, 32, 49, 50, 51, 36, 51, 52, 37, 38, 55, 40>), dim3(num_blocks), dim3(threads_per_block), 0, 0, unchecked_hashes, bitsplitted_keys); } void des_25_encrypt_salt2852(const size_t num_blocks, const size_t threads_per_block, vtype* const unchecked_hashes, const vtype* const bitsplitted_keys) { hipLaunchKernelGGL(( des_25_encrypt<31, 0, 17, 2, 3, 20, 3, 4, 21, 22, 7, 24, 15, 16, 1, 18, 19, 4, 19, 20, 5, 6, 23, 8, 63, 32, 49, 34, 35, 52, 35, 36, 53, 54, 39, 56, 47, 48, 33, 50, 51, 36, 51, 52, 37, 38, 55, 40>), dim3(num_blocks), dim3(threads_per_block), 0, 0, unchecked_hashes, bitsplitted_keys); } void des_25_encrypt_salt2853(const size_t num_blocks, const size_t threads_per_block, vtype* const unchecked_hashes, const vtype* const bitsplitted_keys) { hipLaunchKernelGGL(( des_25_encrypt<15, 0, 17, 2, 3, 20, 3, 4, 21, 22, 7, 24, 31, 16, 1, 18, 19, 4, 19, 20, 5, 6, 23, 8, 47, 32, 49, 34, 35, 52, 35, 36, 53, 54, 39, 56, 63, 48, 33, 50, 51, 36, 51, 52, 37, 38, 55, 40>), dim3(num_blocks), dim3(threads_per_block), 0, 0, unchecked_hashes, bitsplitted_keys); } void des_25_encrypt_salt2854(const size_t num_blocks, const size_t threads_per_block, vtype* const unchecked_hashes, const vtype* const bitsplitted_keys) { hipLaunchKernelGGL(( des_25_encrypt<31, 16, 17, 2, 3, 20, 3, 4, 21, 22, 7, 24, 15, 0, 1, 18, 19, 4, 19, 20, 5, 6, 23, 8, 63, 48, 49, 34, 35, 52, 35, 36, 53, 54, 39, 56, 47, 32, 33, 50, 51, 36, 51, 52, 37, 38, 55, 40>), dim3(num_blocks), dim3(threads_per_block), 0, 0, unchecked_hashes, bitsplitted_keys); } void des_25_encrypt_salt2855(const size_t num_blocks, const size_t threads_per_block, vtype* const unchecked_hashes, const vtype* const bitsplitted_keys) { hipLaunchKernelGGL(( des_25_encrypt<15, 16, 17, 2, 3, 20, 3, 4, 21, 22, 7, 24, 31, 0, 1, 18, 19, 4, 19, 20, 5, 6, 23, 8, 47, 48, 49, 34, 35, 52, 35, 36, 53, 54, 39, 56, 63, 32, 33, 50, 51, 36, 51, 52, 37, 38, 55, 40>), dim3(num_blocks), dim3(threads_per_block), 0, 0, unchecked_hashes, bitsplitted_keys); } void des_25_encrypt_salt2856(const size_t num_blocks, const size_t threads_per_block, vtype* const unchecked_hashes, const vtype* const bitsplitted_keys) { hipLaunchKernelGGL(( des_25_encrypt<31, 0, 1, 18, 3, 20, 3, 4, 21, 22, 7, 24, 15, 16, 17, 2, 19, 4, 19, 20, 5, 6, 23, 8, 63, 32, 33, 50, 35, 52, 35, 36, 53, 54, 39, 56, 47, 48, 49, 34, 51, 36, 51, 52, 37, 38, 55, 40>), dim3(num_blocks), dim3(threads_per_block), 0, 0, unchecked_hashes, bitsplitted_keys); } void des_25_encrypt_salt2857(const size_t num_blocks, const size_t threads_per_block, vtype* const unchecked_hashes, const vtype* const bitsplitted_keys) { hipLaunchKernelGGL(( des_25_encrypt<15, 0, 1, 18, 3, 20, 3, 4, 21, 22, 7, 24, 31, 16, 17, 2, 19, 4, 19, 20, 5, 6, 23, 8, 47, 32, 33, 50, 35, 52, 35, 36, 53, 54, 39, 56, 63, 48, 49, 34, 51, 36, 51, 52, 37, 38, 55, 40>), dim3(num_blocks), dim3(threads_per_block), 0, 0, unchecked_hashes, bitsplitted_keys); } void des_25_encrypt_salt2858(const size_t num_blocks, const size_t threads_per_block, vtype* const unchecked_hashes, const vtype* const bitsplitted_keys) { hipLaunchKernelGGL(( des_25_encrypt<31, 16, 1, 18, 3, 20, 3, 4, 21, 22, 7, 24, 15, 0, 17, 2, 19, 4, 19, 20, 5, 6, 23, 8, 63, 48, 33, 50, 35, 52, 35, 36, 53, 54, 39, 56, 47, 32, 49, 34, 51, 36, 51, 52, 37, 38, 55, 40>), dim3(num_blocks), dim3(threads_per_block), 0, 0, unchecked_hashes, bitsplitted_keys); } void des_25_encrypt_salt2859(const size_t num_blocks, const size_t threads_per_block, vtype* const unchecked_hashes, const vtype* const bitsplitted_keys) { hipLaunchKernelGGL(( des_25_encrypt<15, 16, 1, 18, 3, 20, 3, 4, 21, 22, 7, 24, 31, 0, 17, 2, 19, 4, 19, 20, 5, 6, 23, 8, 47, 48, 33, 50, 35, 52, 35, 36, 53, 54, 39, 56, 63, 32, 49, 34, 51, 36, 51, 52, 37, 38, 55, 40>), dim3(num_blocks), dim3(threads_per_block), 0, 0, unchecked_hashes, bitsplitted_keys); } void des_25_encrypt_salt2860(const size_t num_blocks, const size_t threads_per_block, vtype* const unchecked_hashes, const vtype* const bitsplitted_keys) { hipLaunchKernelGGL(( des_25_encrypt<31, 0, 17, 18, 3, 20, 3, 4, 21, 22, 7, 24, 15, 16, 1, 2, 19, 4, 19, 20, 5, 6, 23, 8, 63, 32, 49, 50, 35, 52, 35, 36, 53, 54, 39, 56, 47, 48, 33, 34, 51, 36, 51, 52, 37, 38, 55, 40>), dim3(num_blocks), dim3(threads_per_block), 0, 0, unchecked_hashes, bitsplitted_keys); } void des_25_encrypt_salt2861(const size_t num_blocks, const size_t threads_per_block, vtype* const unchecked_hashes, const vtype* const bitsplitted_keys) { hipLaunchKernelGGL(( des_25_encrypt<15, 0, 17, 18, 3, 20, 3, 4, 21, 22, 7, 24, 31, 16, 1, 2, 19, 4, 19, 20, 5, 6, 23, 8, 47, 32, 49, 50, 35, 52, 35, 36, 53, 54, 39, 56, 63, 48, 33, 34, 51, 36, 51, 52, 37, 38, 55, 40>), dim3(num_blocks), dim3(threads_per_block), 0, 0, unchecked_hashes, bitsplitted_keys); } void des_25_encrypt_salt2862(const size_t num_blocks, const size_t threads_per_block, vtype* const unchecked_hashes, const vtype* const bitsplitted_keys) { hipLaunchKernelGGL(( des_25_encrypt<31, 16, 17, 18, 3, 20, 3, 4, 21, 22, 7, 24, 15, 0, 1, 2, 19, 4, 19, 20, 5, 6, 23, 8, 63, 48, 49, 50, 35, 52, 35, 36, 53, 54, 39, 56, 47, 32, 33, 34, 51, 36, 51, 52, 37, 38, 55, 40>), dim3(num_blocks), dim3(threads_per_block), 0, 0, unchecked_hashes, bitsplitted_keys); } void des_25_encrypt_salt2863(const size_t num_blocks, const size_t threads_per_block, vtype* const unchecked_hashes, const vtype* const bitsplitted_keys) { hipLaunchKernelGGL(( des_25_encrypt<15, 16, 17, 18, 3, 20, 3, 4, 21, 22, 7, 24, 31, 0, 1, 2, 19, 4, 19, 20, 5, 6, 23, 8, 47, 48, 49, 50, 35, 52, 35, 36, 53, 54, 39, 56, 63, 32, 33, 34, 51, 36, 51, 52, 37, 38, 55, 40>), dim3(num_blocks), dim3(threads_per_block), 0, 0, unchecked_hashes, bitsplitted_keys); } void des_25_encrypt_salt2864(const size_t num_blocks, const size_t threads_per_block, vtype* const unchecked_hashes, const vtype* const bitsplitted_keys) { hipLaunchKernelGGL(( des_25_encrypt<31, 0, 1, 2, 19, 20, 3, 4, 21, 22, 7, 24, 15, 16, 17, 18, 3, 4, 19, 20, 5, 6, 23, 8, 63, 32, 33, 34, 51, 52, 35, 36, 53, 54, 39, 56, 47, 48, 49, 50, 35, 36, 51, 52, 37, 38, 55, 40>), dim3(num_blocks), dim3(threads_per_block), 0, 0, unchecked_hashes, bitsplitted_keys); } void des_25_encrypt_salt2865(const size_t num_blocks, const size_t threads_per_block, vtype* const unchecked_hashes, const vtype* const bitsplitted_keys) { hipLaunchKernelGGL(( des_25_encrypt<15, 0, 1, 2, 19, 20, 3, 4, 21, 22, 7, 24, 31, 16, 17, 18, 3, 4, 19, 20, 5, 6, 23, 8, 47, 32, 33, 34, 51, 52, 35, 36, 53, 54, 39, 56, 63, 48, 49, 50, 35, 36, 51, 52, 37, 38, 55, 40>), dim3(num_blocks), dim3(threads_per_block), 0, 0, unchecked_hashes, bitsplitted_keys); } void des_25_encrypt_salt2866(const size_t num_blocks, const size_t threads_per_block, vtype* const unchecked_hashes, const vtype* const bitsplitted_keys) { hipLaunchKernelGGL(( des_25_encrypt<31, 16, 1, 2, 19, 20, 3, 4, 21, 22, 7, 24, 15, 0, 17, 18, 3, 4, 19, 20, 5, 6, 23, 8, 63, 48, 33, 34, 51, 52, 35, 36, 53, 54, 39, 56, 47, 32, 49, 50, 35, 36, 51, 52, 37, 38, 55, 40>), dim3(num_blocks), dim3(threads_per_block), 0, 0, unchecked_hashes, bitsplitted_keys); } void des_25_encrypt_salt2867(const size_t num_blocks, const size_t threads_per_block, vtype* const unchecked_hashes, const vtype* const bitsplitted_keys) { hipLaunchKernelGGL(( des_25_encrypt<15, 16, 1, 2, 19, 20, 3, 4, 21, 22, 7, 24, 31, 0, 17, 18, 3, 4, 19, 20, 5, 6, 23, 8, 47, 48, 33, 34, 51, 52, 35, 36, 53, 54, 39, 56, 63, 32, 49, 50, 35, 36, 51, 52, 37, 38, 55, 40>), dim3(num_blocks), dim3(threads_per_block), 0, 0, unchecked_hashes, bitsplitted_keys); } void des_25_encrypt_salt2868(const size_t num_blocks, const size_t threads_per_block, vtype* const unchecked_hashes, const vtype* const bitsplitted_keys) { hipLaunchKernelGGL(( des_25_encrypt<31, 0, 17, 2, 19, 20, 3, 4, 21, 22, 7, 24, 15, 16, 1, 18, 3, 4, 19, 20, 5, 6, 23, 8, 63, 32, 49, 34, 51, 52, 35, 36, 53, 54, 39, 56, 47, 48, 33, 50, 35, 36, 51, 52, 37, 38, 55, 40>), dim3(num_blocks), dim3(threads_per_block), 0, 0, unchecked_hashes, bitsplitted_keys); } void des_25_encrypt_salt2869(const size_t num_blocks, const size_t threads_per_block, vtype* const unchecked_hashes, const vtype* const bitsplitted_keys) { hipLaunchKernelGGL(( des_25_encrypt<15, 0, 17, 2, 19, 20, 3, 4, 21, 22, 7, 24, 31, 16, 1, 18, 3, 4, 19, 20, 5, 6, 23, 8, 47, 32, 49, 34, 51, 52, 35, 36, 53, 54, 39, 56, 63, 48, 33, 50, 35, 36, 51, 52, 37, 38, 55, 40>), dim3(num_blocks), dim3(threads_per_block), 0, 0, unchecked_hashes, bitsplitted_keys); } void des_25_encrypt_salt2870(const size_t num_blocks, const size_t threads_per_block, vtype* const unchecked_hashes, const vtype* const bitsplitted_keys) { hipLaunchKernelGGL(( des_25_encrypt<31, 16, 17, 2, 19, 20, 3, 4, 21, 22, 7, 24, 15, 0, 1, 18, 3, 4, 19, 20, 5, 6, 23, 8, 63, 48, 49, 34, 51, 52, 35, 36, 53, 54, 39, 56, 47, 32, 33, 50, 35, 36, 51, 52, 37, 38, 55, 40>), dim3(num_blocks), dim3(threads_per_block), 0, 0, unchecked_hashes, bitsplitted_keys); } void des_25_encrypt_salt2871(const size_t num_blocks, const size_t threads_per_block, vtype* const unchecked_hashes, const vtype* const bitsplitted_keys) { hipLaunchKernelGGL(( des_25_encrypt<15, 16, 17, 2, 19, 20, 3, 4, 21, 22, 7, 24, 31, 0, 1, 18, 3, 4, 19, 20, 5, 6, 23, 8, 47, 48, 49, 34, 51, 52, 35, 36, 53, 54, 39, 56, 63, 32, 33, 50, 35, 36, 51, 52, 37, 38, 55, 40>), dim3(num_blocks), dim3(threads_per_block), 0, 0, unchecked_hashes, bitsplitted_keys); } void des_25_encrypt_salt2872(const size_t num_blocks, const size_t threads_per_block, vtype* const unchecked_hashes, const vtype* const bitsplitted_keys) { hipLaunchKernelGGL(( des_25_encrypt<31, 0, 1, 18, 19, 20, 3, 4, 21, 22, 7, 24, 15, 16, 17, 2, 3, 4, 19, 20, 5, 6, 23, 8, 63, 32, 33, 50, 51, 52, 35, 36, 53, 54, 39, 56, 47, 48, 49, 34, 35, 36, 51, 52, 37, 38, 55, 40>), dim3(num_blocks), dim3(threads_per_block), 0, 0, unchecked_hashes, bitsplitted_keys); } void des_25_encrypt_salt2873(const size_t num_blocks, const size_t threads_per_block, vtype* const unchecked_hashes, const vtype* const bitsplitted_keys) { hipLaunchKernelGGL(( des_25_encrypt<15, 0, 1, 18, 19, 20, 3, 4, 21, 22, 7, 24, 31, 16, 17, 2, 3, 4, 19, 20, 5, 6, 23, 8, 47, 32, 33, 50, 51, 52, 35, 36, 53, 54, 39, 56, 63, 48, 49, 34, 35, 36, 51, 52, 37, 38, 55, 40>), dim3(num_blocks), dim3(threads_per_block), 0, 0, unchecked_hashes, bitsplitted_keys); } void des_25_encrypt_salt2874(const size_t num_blocks, const size_t threads_per_block, vtype* const unchecked_hashes, const vtype* const bitsplitted_keys) { hipLaunchKernelGGL(( des_25_encrypt<31, 16, 1, 18, 19, 20, 3, 4, 21, 22, 7, 24, 15, 0, 17, 2, 3, 4, 19, 20, 5, 6, 23, 8, 63, 48, 33, 50, 51, 52, 35, 36, 53, 54, 39, 56, 47, 32, 49, 34, 35, 36, 51, 52, 37, 38, 55, 40>), dim3(num_blocks), dim3(threads_per_block), 0, 0, unchecked_hashes, bitsplitted_keys); } void des_25_encrypt_salt2875(const size_t num_blocks, const size_t threads_per_block, vtype* const unchecked_hashes, const vtype* const bitsplitted_keys) { hipLaunchKernelGGL(( des_25_encrypt<15, 16, 1, 18, 19, 20, 3, 4, 21, 22, 7, 24, 31, 0, 17, 2, 3, 4, 19, 20, 5, 6, 23, 8, 47, 48, 33, 50, 51, 52, 35, 36, 53, 54, 39, 56, 63, 32, 49, 34, 35, 36, 51, 52, 37, 38, 55, 40>), dim3(num_blocks), dim3(threads_per_block), 0, 0, unchecked_hashes, bitsplitted_keys); } void des_25_encrypt_salt2876(const size_t num_blocks, const size_t threads_per_block, vtype* const unchecked_hashes, const vtype* const bitsplitted_keys) { hipLaunchKernelGGL(( des_25_encrypt<31, 0, 17, 18, 19, 20, 3, 4, 21, 22, 7, 24, 15, 16, 1, 2, 3, 4, 19, 20, 5, 6, 23, 8, 63, 32, 49, 50, 51, 52, 35, 36, 53, 54, 39, 56, 47, 48, 33, 34, 35, 36, 51, 52, 37, 38, 55, 40>), dim3(num_blocks), dim3(threads_per_block), 0, 0, unchecked_hashes, bitsplitted_keys); } void des_25_encrypt_salt2877(const size_t num_blocks, const size_t threads_per_block, vtype* const unchecked_hashes, const vtype* const bitsplitted_keys) { hipLaunchKernelGGL(( des_25_encrypt<15, 0, 17, 18, 19, 20, 3, 4, 21, 22, 7, 24, 31, 16, 1, 2, 3, 4, 19, 20, 5, 6, 23, 8, 47, 32, 49, 50, 51, 52, 35, 36, 53, 54, 39, 56, 63, 48, 33, 34, 35, 36, 51, 52, 37, 38, 55, 40>), dim3(num_blocks), dim3(threads_per_block), 0, 0, unchecked_hashes, bitsplitted_keys); } void des_25_encrypt_salt2878(const size_t num_blocks, const size_t threads_per_block, vtype* const unchecked_hashes, const vtype* const bitsplitted_keys) { hipLaunchKernelGGL(( des_25_encrypt<31, 16, 17, 18, 19, 20, 3, 4, 21, 22, 7, 24, 15, 0, 1, 2, 3, 4, 19, 20, 5, 6, 23, 8, 63, 48, 49, 50, 51, 52, 35, 36, 53, 54, 39, 56, 47, 32, 33, 34, 35, 36, 51, 52, 37, 38, 55, 40>), dim3(num_blocks), dim3(threads_per_block), 0, 0, unchecked_hashes, bitsplitted_keys); } void des_25_encrypt_salt2879(const size_t num_blocks, const size_t threads_per_block, vtype* const unchecked_hashes, const vtype* const bitsplitted_keys) { hipLaunchKernelGGL(( des_25_encrypt<15, 16, 17, 18, 19, 20, 3, 4, 21, 22, 7, 24, 31, 0, 1, 2, 3, 4, 19, 20, 5, 6, 23, 8, 47, 48, 49, 50, 51, 52, 35, 36, 53, 54, 39, 56, 63, 32, 33, 34, 35, 36, 51, 52, 37, 38, 55, 40>), dim3(num_blocks), dim3(threads_per_block), 0, 0, unchecked_hashes, bitsplitted_keys); } void des_25_encrypt_salt2880(const size_t num_blocks, const size_t threads_per_block, vtype* const unchecked_hashes, const vtype* const bitsplitted_keys) { hipLaunchKernelGGL(( des_25_encrypt<31, 0, 1, 2, 3, 4, 19, 4, 21, 22, 7, 24, 15, 16, 17, 18, 19, 20, 3, 20, 5, 6, 23, 8, 63, 32, 33, 34, 35, 36, 51, 36, 53, 54, 39, 56, 47, 48, 49, 50, 51, 52, 35, 52, 37, 38, 55, 40>), dim3(num_blocks), dim3(threads_per_block), 0, 0, unchecked_hashes, bitsplitted_keys); } void des_25_encrypt_salt2881(const size_t num_blocks, const size_t threads_per_block, vtype* const unchecked_hashes, const vtype* const bitsplitted_keys) { hipLaunchKernelGGL(( des_25_encrypt<15, 0, 1, 2, 3, 4, 19, 4, 21, 22, 7, 24, 31, 16, 17, 18, 19, 20, 3, 20, 5, 6, 23, 8, 47, 32, 33, 34, 35, 36, 51, 36, 53, 54, 39, 56, 63, 48, 49, 50, 51, 52, 35, 52, 37, 38, 55, 40>), dim3(num_blocks), dim3(threads_per_block), 0, 0, unchecked_hashes, bitsplitted_keys); } void des_25_encrypt_salt2882(const size_t num_blocks, const size_t threads_per_block, vtype* const unchecked_hashes, const vtype* const bitsplitted_keys) { hipLaunchKernelGGL(( des_25_encrypt<31, 16, 1, 2, 3, 4, 19, 4, 21, 22, 7, 24, 15, 0, 17, 18, 19, 20, 3, 20, 5, 6, 23, 8, 63, 48, 33, 34, 35, 36, 51, 36, 53, 54, 39, 56, 47, 32, 49, 50, 51, 52, 35, 52, 37, 38, 55, 40>), dim3(num_blocks), dim3(threads_per_block), 0, 0, unchecked_hashes, bitsplitted_keys); } void des_25_encrypt_salt2883(const size_t num_blocks, const size_t threads_per_block, vtype* const unchecked_hashes, const vtype* const bitsplitted_keys) { hipLaunchKernelGGL(( des_25_encrypt<15, 16, 1, 2, 3, 4, 19, 4, 21, 22, 7, 24, 31, 0, 17, 18, 19, 20, 3, 20, 5, 6, 23, 8, 47, 48, 33, 34, 35, 36, 51, 36, 53, 54, 39, 56, 63, 32, 49, 50, 51, 52, 35, 52, 37, 38, 55, 40>), dim3(num_blocks), dim3(threads_per_block), 0, 0, unchecked_hashes, bitsplitted_keys); } void des_25_encrypt_salt2884(const size_t num_blocks, const size_t threads_per_block, vtype* const unchecked_hashes, const vtype* const bitsplitted_keys) { hipLaunchKernelGGL(( des_25_encrypt<31, 0, 17, 2, 3, 4, 19, 4, 21, 22, 7, 24, 15, 16, 1, 18, 19, 20, 3, 20, 5, 6, 23, 8, 63, 32, 49, 34, 35, 36, 51, 36, 53, 54, 39, 56, 47, 48, 33, 50, 51, 52, 35, 52, 37, 38, 55, 40>), dim3(num_blocks), dim3(threads_per_block), 0, 0, unchecked_hashes, bitsplitted_keys); } void des_25_encrypt_salt2885(const size_t num_blocks, const size_t threads_per_block, vtype* const unchecked_hashes, const vtype* const bitsplitted_keys) { hipLaunchKernelGGL(( des_25_encrypt<15, 0, 17, 2, 3, 4, 19, 4, 21, 22, 7, 24, 31, 16, 1, 18, 19, 20, 3, 20, 5, 6, 23, 8, 47, 32, 49, 34, 35, 36, 51, 36, 53, 54, 39, 56, 63, 48, 33, 50, 51, 52, 35, 52, 37, 38, 55, 40>), dim3(num_blocks), dim3(threads_per_block), 0, 0, unchecked_hashes, bitsplitted_keys); } void des_25_encrypt_salt2886(const size_t num_blocks, const size_t threads_per_block, vtype* const unchecked_hashes, const vtype* const bitsplitted_keys) { hipLaunchKernelGGL(( des_25_encrypt<31, 16, 17, 2, 3, 4, 19, 4, 21, 22, 7, 24, 15, 0, 1, 18, 19, 20, 3, 20, 5, 6, 23, 8, 63, 48, 49, 34, 35, 36, 51, 36, 53, 54, 39, 56, 47, 32, 33, 50, 51, 52, 35, 52, 37, 38, 55, 40>), dim3(num_blocks), dim3(threads_per_block), 0, 0, unchecked_hashes, bitsplitted_keys); } void des_25_encrypt_salt2887(const size_t num_blocks, const size_t threads_per_block, vtype* const unchecked_hashes, const vtype* const bitsplitted_keys) { hipLaunchKernelGGL(( des_25_encrypt<15, 16, 17, 2, 3, 4, 19, 4, 21, 22, 7, 24, 31, 0, 1, 18, 19, 20, 3, 20, 5, 6, 23, 8, 47, 48, 49, 34, 35, 36, 51, 36, 53, 54, 39, 56, 63, 32, 33, 50, 51, 52, 35, 52, 37, 38, 55, 40>), dim3(num_blocks), dim3(threads_per_block), 0, 0, unchecked_hashes, bitsplitted_keys); } void des_25_encrypt_salt2888(const size_t num_blocks, const size_t threads_per_block, vtype* const unchecked_hashes, const vtype* const bitsplitted_keys) { hipLaunchKernelGGL(( des_25_encrypt<31, 0, 1, 18, 3, 4, 19, 4, 21, 22, 7, 24, 15, 16, 17, 2, 19, 20, 3, 20, 5, 6, 23, 8, 63, 32, 33, 50, 35, 36, 51, 36, 53, 54, 39, 56, 47, 48, 49, 34, 51, 52, 35, 52, 37, 38, 55, 40>), dim3(num_blocks), dim3(threads_per_block), 0, 0, unchecked_hashes, bitsplitted_keys); } void des_25_encrypt_salt2889(const size_t num_blocks, const size_t threads_per_block, vtype* const unchecked_hashes, const vtype* const bitsplitted_keys) { hipLaunchKernelGGL(( des_25_encrypt<15, 0, 1, 18, 3, 4, 19, 4, 21, 22, 7, 24, 31, 16, 17, 2, 19, 20, 3, 20, 5, 6, 23, 8, 47, 32, 33, 50, 35, 36, 51, 36, 53, 54, 39, 56, 63, 48, 49, 34, 51, 52, 35, 52, 37, 38, 55, 40>), dim3(num_blocks), dim3(threads_per_block), 0, 0, unchecked_hashes, bitsplitted_keys); } void des_25_encrypt_salt2890(const size_t num_blocks, const size_t threads_per_block, vtype* const unchecked_hashes, const vtype* const bitsplitted_keys) { hipLaunchKernelGGL(( des_25_encrypt<31, 16, 1, 18, 3, 4, 19, 4, 21, 22, 7, 24, 15, 0, 17, 2, 19, 20, 3, 20, 5, 6, 23, 8, 63, 48, 33, 50, 35, 36, 51, 36, 53, 54, 39, 56, 47, 32, 49, 34, 51, 52, 35, 52, 37, 38, 55, 40>), dim3(num_blocks), dim3(threads_per_block), 0, 0, unchecked_hashes, bitsplitted_keys); } void des_25_encrypt_salt2891(const size_t num_blocks, const size_t threads_per_block, vtype* const unchecked_hashes, const vtype* const bitsplitted_keys) { hipLaunchKernelGGL(( des_25_encrypt<15, 16, 1, 18, 3, 4, 19, 4, 21, 22, 7, 24, 31, 0, 17, 2, 19, 20, 3, 20, 5, 6, 23, 8, 47, 48, 33, 50, 35, 36, 51, 36, 53, 54, 39, 56, 63, 32, 49, 34, 51, 52, 35, 52, 37, 38, 55, 40>), dim3(num_blocks), dim3(threads_per_block), 0, 0, unchecked_hashes, bitsplitted_keys); } void des_25_encrypt_salt2892(const size_t num_blocks, const size_t threads_per_block, vtype* const unchecked_hashes, const vtype* const bitsplitted_keys) { hipLaunchKernelGGL(( des_25_encrypt<31, 0, 17, 18, 3, 4, 19, 4, 21, 22, 7, 24, 15, 16, 1, 2, 19, 20, 3, 20, 5, 6, 23, 8, 63, 32, 49, 50, 35, 36, 51, 36, 53, 54, 39, 56, 47, 48, 33, 34, 51, 52, 35, 52, 37, 38, 55, 40>), dim3(num_blocks), dim3(threads_per_block), 0, 0, unchecked_hashes, bitsplitted_keys); } void des_25_encrypt_salt2893(const size_t num_blocks, const size_t threads_per_block, vtype* const unchecked_hashes, const vtype* const bitsplitted_keys) { hipLaunchKernelGGL(( des_25_encrypt<15, 0, 17, 18, 3, 4, 19, 4, 21, 22, 7, 24, 31, 16, 1, 2, 19, 20, 3, 20, 5, 6, 23, 8, 47, 32, 49, 50, 35, 36, 51, 36, 53, 54, 39, 56, 63, 48, 33, 34, 51, 52, 35, 52, 37, 38, 55, 40>), dim3(num_blocks), dim3(threads_per_block), 0, 0, unchecked_hashes, bitsplitted_keys); } void des_25_encrypt_salt2894(const size_t num_blocks, const size_t threads_per_block, vtype* const unchecked_hashes, const vtype* const bitsplitted_keys) { hipLaunchKernelGGL(( des_25_encrypt<31, 16, 17, 18, 3, 4, 19, 4, 21, 22, 7, 24, 15, 0, 1, 2, 19, 20, 3, 20, 5, 6, 23, 8, 63, 48, 49, 50, 35, 36, 51, 36, 53, 54, 39, 56, 47, 32, 33, 34, 51, 52, 35, 52, 37, 38, 55, 40>), dim3(num_blocks), dim3(threads_per_block), 0, 0, unchecked_hashes, bitsplitted_keys); } void des_25_encrypt_salt2895(const size_t num_blocks, const size_t threads_per_block, vtype* const unchecked_hashes, const vtype* const bitsplitted_keys) { hipLaunchKernelGGL(( des_25_encrypt<15, 16, 17, 18, 3, 4, 19, 4, 21, 22, 7, 24, 31, 0, 1, 2, 19, 20, 3, 20, 5, 6, 23, 8, 47, 48, 49, 50, 35, 36, 51, 36, 53, 54, 39, 56, 63, 32, 33, 34, 51, 52, 35, 52, 37, 38, 55, 40>), dim3(num_blocks), dim3(threads_per_block), 0, 0, unchecked_hashes, bitsplitted_keys); } void des_25_encrypt_salt2896(const size_t num_blocks, const size_t threads_per_block, vtype* const unchecked_hashes, const vtype* const bitsplitted_keys) { hipLaunchKernelGGL(( des_25_encrypt<31, 0, 1, 2, 19, 4, 19, 4, 21, 22, 7, 24, 15, 16, 17, 18, 3, 20, 3, 20, 5, 6, 23, 8, 63, 32, 33, 34, 51, 36, 51, 36, 53, 54, 39, 56, 47, 48, 49, 50, 35, 52, 35, 52, 37, 38, 55, 40>), dim3(num_blocks), dim3(threads_per_block), 0, 0, unchecked_hashes, bitsplitted_keys); } void des_25_encrypt_salt2897(const size_t num_blocks, const size_t threads_per_block, vtype* const unchecked_hashes, const vtype* const bitsplitted_keys) { hipLaunchKernelGGL(( des_25_encrypt<15, 0, 1, 2, 19, 4, 19, 4, 21, 22, 7, 24, 31, 16, 17, 18, 3, 20, 3, 20, 5, 6, 23, 8, 47, 32, 33, 34, 51, 36, 51, 36, 53, 54, 39, 56, 63, 48, 49, 50, 35, 52, 35, 52, 37, 38, 55, 40>), dim3(num_blocks), dim3(threads_per_block), 0, 0, unchecked_hashes, bitsplitted_keys); } void des_25_encrypt_salt2898(const size_t num_blocks, const size_t threads_per_block, vtype* const unchecked_hashes, const vtype* const bitsplitted_keys) { hipLaunchKernelGGL(( des_25_encrypt<31, 16, 1, 2, 19, 4, 19, 4, 21, 22, 7, 24, 15, 0, 17, 18, 3, 20, 3, 20, 5, 6, 23, 8, 63, 48, 33, 34, 51, 36, 51, 36, 53, 54, 39, 56, 47, 32, 49, 50, 35, 52, 35, 52, 37, 38, 55, 40>), dim3(num_blocks), dim3(threads_per_block), 0, 0, unchecked_hashes, bitsplitted_keys); } void des_25_encrypt_salt2899(const size_t num_blocks, const size_t threads_per_block, vtype* const unchecked_hashes, const vtype* const bitsplitted_keys) { hipLaunchKernelGGL(( des_25_encrypt<15, 16, 1, 2, 19, 4, 19, 4, 21, 22, 7, 24, 31, 0, 17, 18, 3, 20, 3, 20, 5, 6, 23, 8, 47, 48, 33, 34, 51, 36, 51, 36, 53, 54, 39, 56, 63, 32, 49, 50, 35, 52, 35, 52, 37, 38, 55, 40>), dim3(num_blocks), dim3(threads_per_block), 0, 0, unchecked_hashes, bitsplitted_keys); } void des_25_encrypt_salt2900(const size_t num_blocks, const size_t threads_per_block, vtype* const unchecked_hashes, const vtype* const bitsplitted_keys) { hipLaunchKernelGGL(( des_25_encrypt<31, 0, 17, 2, 19, 4, 19, 4, 21, 22, 7, 24, 15, 16, 1, 18, 3, 20, 3, 20, 5, 6, 23, 8, 63, 32, 49, 34, 51, 36, 51, 36, 53, 54, 39, 56, 47, 48, 33, 50, 35, 52, 35, 52, 37, 38, 55, 40>), dim3(num_blocks), dim3(threads_per_block), 0, 0, unchecked_hashes, bitsplitted_keys); } void des_25_encrypt_salt2901(const size_t num_blocks, const size_t threads_per_block, vtype* const unchecked_hashes, const vtype* const bitsplitted_keys) { hipLaunchKernelGGL(( des_25_encrypt<15, 0, 17, 2, 19, 4, 19, 4, 21, 22, 7, 24, 31, 16, 1, 18, 3, 20, 3, 20, 5, 6, 23, 8, 47, 32, 49, 34, 51, 36, 51, 36, 53, 54, 39, 56, 63, 48, 33, 50, 35, 52, 35, 52, 37, 38, 55, 40>), dim3(num_blocks), dim3(threads_per_block), 0, 0, unchecked_hashes, bitsplitted_keys); } void des_25_encrypt_salt2902(const size_t num_blocks, const size_t threads_per_block, vtype* const unchecked_hashes, const vtype* const bitsplitted_keys) { hipLaunchKernelGGL(( des_25_encrypt<31, 16, 17, 2, 19, 4, 19, 4, 21, 22, 7, 24, 15, 0, 1, 18, 3, 20, 3, 20, 5, 6, 23, 8, 63, 48, 49, 34, 51, 36, 51, 36, 53, 54, 39, 56, 47, 32, 33, 50, 35, 52, 35, 52, 37, 38, 55, 40>), dim3(num_blocks), dim3(threads_per_block), 0, 0, unchecked_hashes, bitsplitted_keys); } void des_25_encrypt_salt2903(const size_t num_blocks, const size_t threads_per_block, vtype* const unchecked_hashes, const vtype* const bitsplitted_keys) { hipLaunchKernelGGL(( des_25_encrypt<15, 16, 17, 2, 19, 4, 19, 4, 21, 22, 7, 24, 31, 0, 1, 18, 3, 20, 3, 20, 5, 6, 23, 8, 47, 48, 49, 34, 51, 36, 51, 36, 53, 54, 39, 56, 63, 32, 33, 50, 35, 52, 35, 52, 37, 38, 55, 40>), dim3(num_blocks), dim3(threads_per_block), 0, 0, unchecked_hashes, bitsplitted_keys); } void des_25_encrypt_salt2904(const size_t num_blocks, const size_t threads_per_block, vtype* const unchecked_hashes, const vtype* const bitsplitted_keys) { hipLaunchKernelGGL(( des_25_encrypt<31, 0, 1, 18, 19, 4, 19, 4, 21, 22, 7, 24, 15, 16, 17, 2, 3, 20, 3, 20, 5, 6, 23, 8, 63, 32, 33, 50, 51, 36, 51, 36, 53, 54, 39, 56, 47, 48, 49, 34, 35, 52, 35, 52, 37, 38, 55, 40>), dim3(num_blocks), dim3(threads_per_block), 0, 0, unchecked_hashes, bitsplitted_keys); } void des_25_encrypt_salt2905(const size_t num_blocks, const size_t threads_per_block, vtype* const unchecked_hashes, const vtype* const bitsplitted_keys) { hipLaunchKernelGGL(( des_25_encrypt<15, 0, 1, 18, 19, 4, 19, 4, 21, 22, 7, 24, 31, 16, 17, 2, 3, 20, 3, 20, 5, 6, 23, 8, 47, 32, 33, 50, 51, 36, 51, 36, 53, 54, 39, 56, 63, 48, 49, 34, 35, 52, 35, 52, 37, 38, 55, 40>), dim3(num_blocks), dim3(threads_per_block), 0, 0, unchecked_hashes, bitsplitted_keys); } void des_25_encrypt_salt2906(const size_t num_blocks, const size_t threads_per_block, vtype* const unchecked_hashes, const vtype* const bitsplitted_keys) { hipLaunchKernelGGL(( des_25_encrypt<31, 16, 1, 18, 19, 4, 19, 4, 21, 22, 7, 24, 15, 0, 17, 2, 3, 20, 3, 20, 5, 6, 23, 8, 63, 48, 33, 50, 51, 36, 51, 36, 53, 54, 39, 56, 47, 32, 49, 34, 35, 52, 35, 52, 37, 38, 55, 40>), dim3(num_blocks), dim3(threads_per_block), 0, 0, unchecked_hashes, bitsplitted_keys); } void des_25_encrypt_salt2907(const size_t num_blocks, const size_t threads_per_block, vtype* const unchecked_hashes, const vtype* const bitsplitted_keys) { hipLaunchKernelGGL(( des_25_encrypt<15, 16, 1, 18, 19, 4, 19, 4, 21, 22, 7, 24, 31, 0, 17, 2, 3, 20, 3, 20, 5, 6, 23, 8, 47, 48, 33, 50, 51, 36, 51, 36, 53, 54, 39, 56, 63, 32, 49, 34, 35, 52, 35, 52, 37, 38, 55, 40>), dim3(num_blocks), dim3(threads_per_block), 0, 0, unchecked_hashes, bitsplitted_keys); } void des_25_encrypt_salt2908(const size_t num_blocks, const size_t threads_per_block, vtype* const unchecked_hashes, const vtype* const bitsplitted_keys) { hipLaunchKernelGGL(( des_25_encrypt<31, 0, 17, 18, 19, 4, 19, 4, 21, 22, 7, 24, 15, 16, 1, 2, 3, 20, 3, 20, 5, 6, 23, 8, 63, 32, 49, 50, 51, 36, 51, 36, 53, 54, 39, 56, 47, 48, 33, 34, 35, 52, 35, 52, 37, 38, 55, 40>), dim3(num_blocks), dim3(threads_per_block), 0, 0, unchecked_hashes, bitsplitted_keys); } void des_25_encrypt_salt2909(const size_t num_blocks, const size_t threads_per_block, vtype* const unchecked_hashes, const vtype* const bitsplitted_keys) { hipLaunchKernelGGL(( des_25_encrypt<15, 0, 17, 18, 19, 4, 19, 4, 21, 22, 7, 24, 31, 16, 1, 2, 3, 20, 3, 20, 5, 6, 23, 8, 47, 32, 49, 50, 51, 36, 51, 36, 53, 54, 39, 56, 63, 48, 33, 34, 35, 52, 35, 52, 37, 38, 55, 40>), dim3(num_blocks), dim3(threads_per_block), 0, 0, unchecked_hashes, bitsplitted_keys); } void des_25_encrypt_salt2910(const size_t num_blocks, const size_t threads_per_block, vtype* const unchecked_hashes, const vtype* const bitsplitted_keys) { hipLaunchKernelGGL(( des_25_encrypt<31, 16, 17, 18, 19, 4, 19, 4, 21, 22, 7, 24, 15, 0, 1, 2, 3, 20, 3, 20, 5, 6, 23, 8, 63, 48, 49, 50, 51, 36, 51, 36, 53, 54, 39, 56, 47, 32, 33, 34, 35, 52, 35, 52, 37, 38, 55, 40>), dim3(num_blocks), dim3(threads_per_block), 0, 0, unchecked_hashes, bitsplitted_keys); } void des_25_encrypt_salt2911(const size_t num_blocks, const size_t threads_per_block, vtype* const unchecked_hashes, const vtype* const bitsplitted_keys) { hipLaunchKernelGGL(( des_25_encrypt<15, 16, 17, 18, 19, 4, 19, 4, 21, 22, 7, 24, 31, 0, 1, 2, 3, 20, 3, 20, 5, 6, 23, 8, 47, 48, 49, 50, 51, 36, 51, 36, 53, 54, 39, 56, 63, 32, 33, 34, 35, 52, 35, 52, 37, 38, 55, 40>), dim3(num_blocks), dim3(threads_per_block), 0, 0, unchecked_hashes, bitsplitted_keys); } void des_25_encrypt_salt2912(const size_t num_blocks, const size_t threads_per_block, vtype* const unchecked_hashes, const vtype* const bitsplitted_keys) { hipLaunchKernelGGL(( des_25_encrypt<31, 0, 1, 2, 3, 20, 19, 4, 21, 22, 7, 24, 15, 16, 17, 18, 19, 4, 3, 20, 5, 6, 23, 8, 63, 32, 33, 34, 35, 52, 51, 36, 53, 54, 39, 56, 47, 48, 49, 50, 51, 36, 35, 52, 37, 38, 55, 40>), dim3(num_blocks), dim3(threads_per_block), 0, 0, unchecked_hashes, bitsplitted_keys); } void des_25_encrypt_salt2913(const size_t num_blocks, const size_t threads_per_block, vtype* const unchecked_hashes, const vtype* const bitsplitted_keys) { hipLaunchKernelGGL(( des_25_encrypt<15, 0, 1, 2, 3, 20, 19, 4, 21, 22, 7, 24, 31, 16, 17, 18, 19, 4, 3, 20, 5, 6, 23, 8, 47, 32, 33, 34, 35, 52, 51, 36, 53, 54, 39, 56, 63, 48, 49, 50, 51, 36, 35, 52, 37, 38, 55, 40>), dim3(num_blocks), dim3(threads_per_block), 0, 0, unchecked_hashes, bitsplitted_keys); } void des_25_encrypt_salt2914(const size_t num_blocks, const size_t threads_per_block, vtype* const unchecked_hashes, const vtype* const bitsplitted_keys) { hipLaunchKernelGGL(( des_25_encrypt<31, 16, 1, 2, 3, 20, 19, 4, 21, 22, 7, 24, 15, 0, 17, 18, 19, 4, 3, 20, 5, 6, 23, 8, 63, 48, 33, 34, 35, 52, 51, 36, 53, 54, 39, 56, 47, 32, 49, 50, 51, 36, 35, 52, 37, 38, 55, 40>), dim3(num_blocks), dim3(threads_per_block), 0, 0, unchecked_hashes, bitsplitted_keys); } void des_25_encrypt_salt2915(const size_t num_blocks, const size_t threads_per_block, vtype* const unchecked_hashes, const vtype* const bitsplitted_keys) { hipLaunchKernelGGL(( des_25_encrypt<15, 16, 1, 2, 3, 20, 19, 4, 21, 22, 7, 24, 31, 0, 17, 18, 19, 4, 3, 20, 5, 6, 23, 8, 47, 48, 33, 34, 35, 52, 51, 36, 53, 54, 39, 56, 63, 32, 49, 50, 51, 36, 35, 52, 37, 38, 55, 40>), dim3(num_blocks), dim3(threads_per_block), 0, 0, unchecked_hashes, bitsplitted_keys); } void des_25_encrypt_salt2916(const size_t num_blocks, const size_t threads_per_block, vtype* const unchecked_hashes, const vtype* const bitsplitted_keys) { hipLaunchKernelGGL(( des_25_encrypt<31, 0, 17, 2, 3, 20, 19, 4, 21, 22, 7, 24, 15, 16, 1, 18, 19, 4, 3, 20, 5, 6, 23, 8, 63, 32, 49, 34, 35, 52, 51, 36, 53, 54, 39, 56, 47, 48, 33, 50, 51, 36, 35, 52, 37, 38, 55, 40>), dim3(num_blocks), dim3(threads_per_block), 0, 0, unchecked_hashes, bitsplitted_keys); } void des_25_encrypt_salt2917(const size_t num_blocks, const size_t threads_per_block, vtype* const unchecked_hashes, const vtype* const bitsplitted_keys) { hipLaunchKernelGGL(( des_25_encrypt<15, 0, 17, 2, 3, 20, 19, 4, 21, 22, 7, 24, 31, 16, 1, 18, 19, 4, 3, 20, 5, 6, 23, 8, 47, 32, 49, 34, 35, 52, 51, 36, 53, 54, 39, 56, 63, 48, 33, 50, 51, 36, 35, 52, 37, 38, 55, 40>), dim3(num_blocks), dim3(threads_per_block), 0, 0, unchecked_hashes, bitsplitted_keys); } void des_25_encrypt_salt2918(const size_t num_blocks, const size_t threads_per_block, vtype* const unchecked_hashes, const vtype* const bitsplitted_keys) { hipLaunchKernelGGL(( des_25_encrypt<31, 16, 17, 2, 3, 20, 19, 4, 21, 22, 7, 24, 15, 0, 1, 18, 19, 4, 3, 20, 5, 6, 23, 8, 63, 48, 49, 34, 35, 52, 51, 36, 53, 54, 39, 56, 47, 32, 33, 50, 51, 36, 35, 52, 37, 38, 55, 40>), dim3(num_blocks), dim3(threads_per_block), 0, 0, unchecked_hashes, bitsplitted_keys); } void des_25_encrypt_salt2919(const size_t num_blocks, const size_t threads_per_block, vtype* const unchecked_hashes, const vtype* const bitsplitted_keys) { hipLaunchKernelGGL(( des_25_encrypt<15, 16, 17, 2, 3, 20, 19, 4, 21, 22, 7, 24, 31, 0, 1, 18, 19, 4, 3, 20, 5, 6, 23, 8, 47, 48, 49, 34, 35, 52, 51, 36, 53, 54, 39, 56, 63, 32, 33, 50, 51, 36, 35, 52, 37, 38, 55, 40>), dim3(num_blocks), dim3(threads_per_block), 0, 0, unchecked_hashes, bitsplitted_keys); } void des_25_encrypt_salt2920(const size_t num_blocks, const size_t threads_per_block, vtype* const unchecked_hashes, const vtype* const bitsplitted_keys) { hipLaunchKernelGGL(( des_25_encrypt<31, 0, 1, 18, 3, 20, 19, 4, 21, 22, 7, 24, 15, 16, 17, 2, 19, 4, 3, 20, 5, 6, 23, 8, 63, 32, 33, 50, 35, 52, 51, 36, 53, 54, 39, 56, 47, 48, 49, 34, 51, 36, 35, 52, 37, 38, 55, 40>), dim3(num_blocks), dim3(threads_per_block), 0, 0, unchecked_hashes, bitsplitted_keys); } void des_25_encrypt_salt2921(const size_t num_blocks, const size_t threads_per_block, vtype* const unchecked_hashes, const vtype* const bitsplitted_keys) { hipLaunchKernelGGL(( des_25_encrypt<15, 0, 1, 18, 3, 20, 19, 4, 21, 22, 7, 24, 31, 16, 17, 2, 19, 4, 3, 20, 5, 6, 23, 8, 47, 32, 33, 50, 35, 52, 51, 36, 53, 54, 39, 56, 63, 48, 49, 34, 51, 36, 35, 52, 37, 38, 55, 40>), dim3(num_blocks), dim3(threads_per_block), 0, 0, unchecked_hashes, bitsplitted_keys); } void des_25_encrypt_salt2922(const size_t num_blocks, const size_t threads_per_block, vtype* const unchecked_hashes, const vtype* const bitsplitted_keys) { hipLaunchKernelGGL(( des_25_encrypt<31, 16, 1, 18, 3, 20, 19, 4, 21, 22, 7, 24, 15, 0, 17, 2, 19, 4, 3, 20, 5, 6, 23, 8, 63, 48, 33, 50, 35, 52, 51, 36, 53, 54, 39, 56, 47, 32, 49, 34, 51, 36, 35, 52, 37, 38, 55, 40>), dim3(num_blocks), dim3(threads_per_block), 0, 0, unchecked_hashes, bitsplitted_keys); } void des_25_encrypt_salt2923(const size_t num_blocks, const size_t threads_per_block, vtype* const unchecked_hashes, const vtype* const bitsplitted_keys) { hipLaunchKernelGGL(( des_25_encrypt<15, 16, 1, 18, 3, 20, 19, 4, 21, 22, 7, 24, 31, 0, 17, 2, 19, 4, 3, 20, 5, 6, 23, 8, 47, 48, 33, 50, 35, 52, 51, 36, 53, 54, 39, 56, 63, 32, 49, 34, 51, 36, 35, 52, 37, 38, 55, 40>), dim3(num_blocks), dim3(threads_per_block), 0, 0, unchecked_hashes, bitsplitted_keys); } void des_25_encrypt_salt2924(const size_t num_blocks, const size_t threads_per_block, vtype* const unchecked_hashes, const vtype* const bitsplitted_keys) { hipLaunchKernelGGL(( des_25_encrypt<31, 0, 17, 18, 3, 20, 19, 4, 21, 22, 7, 24, 15, 16, 1, 2, 19, 4, 3, 20, 5, 6, 23, 8, 63, 32, 49, 50, 35, 52, 51, 36, 53, 54, 39, 56, 47, 48, 33, 34, 51, 36, 35, 52, 37, 38, 55, 40>), dim3(num_blocks), dim3(threads_per_block), 0, 0, unchecked_hashes, bitsplitted_keys); } void des_25_encrypt_salt2925(const size_t num_blocks, const size_t threads_per_block, vtype* const unchecked_hashes, const vtype* const bitsplitted_keys) { hipLaunchKernelGGL(( des_25_encrypt<15, 0, 17, 18, 3, 20, 19, 4, 21, 22, 7, 24, 31, 16, 1, 2, 19, 4, 3, 20, 5, 6, 23, 8, 47, 32, 49, 50, 35, 52, 51, 36, 53, 54, 39, 56, 63, 48, 33, 34, 51, 36, 35, 52, 37, 38, 55, 40>), dim3(num_blocks), dim3(threads_per_block), 0, 0, unchecked_hashes, bitsplitted_keys); } void des_25_encrypt_salt2926(const size_t num_blocks, const size_t threads_per_block, vtype* const unchecked_hashes, const vtype* const bitsplitted_keys) { hipLaunchKernelGGL(( des_25_encrypt<31, 16, 17, 18, 3, 20, 19, 4, 21, 22, 7, 24, 15, 0, 1, 2, 19, 4, 3, 20, 5, 6, 23, 8, 63, 48, 49, 50, 35, 52, 51, 36, 53, 54, 39, 56, 47, 32, 33, 34, 51, 36, 35, 52, 37, 38, 55, 40>), dim3(num_blocks), dim3(threads_per_block), 0, 0, unchecked_hashes, bitsplitted_keys); } void des_25_encrypt_salt2927(const size_t num_blocks, const size_t threads_per_block, vtype* const unchecked_hashes, const vtype* const bitsplitted_keys) { hipLaunchKernelGGL(( des_25_encrypt<15, 16, 17, 18, 3, 20, 19, 4, 21, 22, 7, 24, 31, 0, 1, 2, 19, 4, 3, 20, 5, 6, 23, 8, 47, 48, 49, 50, 35, 52, 51, 36, 53, 54, 39, 56, 63, 32, 33, 34, 51, 36, 35, 52, 37, 38, 55, 40>), dim3(num_blocks), dim3(threads_per_block), 0, 0, unchecked_hashes, bitsplitted_keys); } void des_25_encrypt_salt2928(const size_t num_blocks, const size_t threads_per_block, vtype* const unchecked_hashes, const vtype* const bitsplitted_keys) { hipLaunchKernelGGL(( des_25_encrypt<31, 0, 1, 2, 19, 20, 19, 4, 21, 22, 7, 24, 15, 16, 17, 18, 3, 4, 3, 20, 5, 6, 23, 8, 63, 32, 33, 34, 51, 52, 51, 36, 53, 54, 39, 56, 47, 48, 49, 50, 35, 36, 35, 52, 37, 38, 55, 40>), dim3(num_blocks), dim3(threads_per_block), 0, 0, unchecked_hashes, bitsplitted_keys); } void des_25_encrypt_salt2929(const size_t num_blocks, const size_t threads_per_block, vtype* const unchecked_hashes, const vtype* const bitsplitted_keys) { hipLaunchKernelGGL(( des_25_encrypt<15, 0, 1, 2, 19, 20, 19, 4, 21, 22, 7, 24, 31, 16, 17, 18, 3, 4, 3, 20, 5, 6, 23, 8, 47, 32, 33, 34, 51, 52, 51, 36, 53, 54, 39, 56, 63, 48, 49, 50, 35, 36, 35, 52, 37, 38, 55, 40>), dim3(num_blocks), dim3(threads_per_block), 0, 0, unchecked_hashes, bitsplitted_keys); } void des_25_encrypt_salt2930(const size_t num_blocks, const size_t threads_per_block, vtype* const unchecked_hashes, const vtype* const bitsplitted_keys) { hipLaunchKernelGGL(( des_25_encrypt<31, 16, 1, 2, 19, 20, 19, 4, 21, 22, 7, 24, 15, 0, 17, 18, 3, 4, 3, 20, 5, 6, 23, 8, 63, 48, 33, 34, 51, 52, 51, 36, 53, 54, 39, 56, 47, 32, 49, 50, 35, 36, 35, 52, 37, 38, 55, 40>), dim3(num_blocks), dim3(threads_per_block), 0, 0, unchecked_hashes, bitsplitted_keys); } void des_25_encrypt_salt2931(const size_t num_blocks, const size_t threads_per_block, vtype* const unchecked_hashes, const vtype* const bitsplitted_keys) { hipLaunchKernelGGL(( des_25_encrypt<15, 16, 1, 2, 19, 20, 19, 4, 21, 22, 7, 24, 31, 0, 17, 18, 3, 4, 3, 20, 5, 6, 23, 8, 47, 48, 33, 34, 51, 52, 51, 36, 53, 54, 39, 56, 63, 32, 49, 50, 35, 36, 35, 52, 37, 38, 55, 40>), dim3(num_blocks), dim3(threads_per_block), 0, 0, unchecked_hashes, bitsplitted_keys); } void des_25_encrypt_salt2932(const size_t num_blocks, const size_t threads_per_block, vtype* const unchecked_hashes, const vtype* const bitsplitted_keys) { hipLaunchKernelGGL(( des_25_encrypt<31, 0, 17, 2, 19, 20, 19, 4, 21, 22, 7, 24, 15, 16, 1, 18, 3, 4, 3, 20, 5, 6, 23, 8, 63, 32, 49, 34, 51, 52, 51, 36, 53, 54, 39, 56, 47, 48, 33, 50, 35, 36, 35, 52, 37, 38, 55, 40>), dim3(num_blocks), dim3(threads_per_block), 0, 0, unchecked_hashes, bitsplitted_keys); } void des_25_encrypt_salt2933(const size_t num_blocks, const size_t threads_per_block, vtype* const unchecked_hashes, const vtype* const bitsplitted_keys) { hipLaunchKernelGGL(( des_25_encrypt<15, 0, 17, 2, 19, 20, 19, 4, 21, 22, 7, 24, 31, 16, 1, 18, 3, 4, 3, 20, 5, 6, 23, 8, 47, 32, 49, 34, 51, 52, 51, 36, 53, 54, 39, 56, 63, 48, 33, 50, 35, 36, 35, 52, 37, 38, 55, 40>), dim3(num_blocks), dim3(threads_per_block), 0, 0, unchecked_hashes, bitsplitted_keys); } void des_25_encrypt_salt2934(const size_t num_blocks, const size_t threads_per_block, vtype* const unchecked_hashes, const vtype* const bitsplitted_keys) { hipLaunchKernelGGL(( des_25_encrypt<31, 16, 17, 2, 19, 20, 19, 4, 21, 22, 7, 24, 15, 0, 1, 18, 3, 4, 3, 20, 5, 6, 23, 8, 63, 48, 49, 34, 51, 52, 51, 36, 53, 54, 39, 56, 47, 32, 33, 50, 35, 36, 35, 52, 37, 38, 55, 40>), dim3(num_blocks), dim3(threads_per_block), 0, 0, unchecked_hashes, bitsplitted_keys); } void des_25_encrypt_salt2935(const size_t num_blocks, const size_t threads_per_block, vtype* const unchecked_hashes, const vtype* const bitsplitted_keys) { hipLaunchKernelGGL(( des_25_encrypt<15, 16, 17, 2, 19, 20, 19, 4, 21, 22, 7, 24, 31, 0, 1, 18, 3, 4, 3, 20, 5, 6, 23, 8, 47, 48, 49, 34, 51, 52, 51, 36, 53, 54, 39, 56, 63, 32, 33, 50, 35, 36, 35, 52, 37, 38, 55, 40>), dim3(num_blocks), dim3(threads_per_block), 0, 0, unchecked_hashes, bitsplitted_keys); } void des_25_encrypt_salt2936(const size_t num_blocks, const size_t threads_per_block, vtype* const unchecked_hashes, const vtype* const bitsplitted_keys) { hipLaunchKernelGGL(( des_25_encrypt<31, 0, 1, 18, 19, 20, 19, 4, 21, 22, 7, 24, 15, 16, 17, 2, 3, 4, 3, 20, 5, 6, 23, 8, 63, 32, 33, 50, 51, 52, 51, 36, 53, 54, 39, 56, 47, 48, 49, 34, 35, 36, 35, 52, 37, 38, 55, 40>), dim3(num_blocks), dim3(threads_per_block), 0, 0, unchecked_hashes, bitsplitted_keys); } void des_25_encrypt_salt2937(const size_t num_blocks, const size_t threads_per_block, vtype* const unchecked_hashes, const vtype* const bitsplitted_keys) { hipLaunchKernelGGL(( des_25_encrypt<15, 0, 1, 18, 19, 20, 19, 4, 21, 22, 7, 24, 31, 16, 17, 2, 3, 4, 3, 20, 5, 6, 23, 8, 47, 32, 33, 50, 51, 52, 51, 36, 53, 54, 39, 56, 63, 48, 49, 34, 35, 36, 35, 52, 37, 38, 55, 40>), dim3(num_blocks), dim3(threads_per_block), 0, 0, unchecked_hashes, bitsplitted_keys); } void des_25_encrypt_salt2938(const size_t num_blocks, const size_t threads_per_block, vtype* const unchecked_hashes, const vtype* const bitsplitted_keys) { hipLaunchKernelGGL(( des_25_encrypt<31, 16, 1, 18, 19, 20, 19, 4, 21, 22, 7, 24, 15, 0, 17, 2, 3, 4, 3, 20, 5, 6, 23, 8, 63, 48, 33, 50, 51, 52, 51, 36, 53, 54, 39, 56, 47, 32, 49, 34, 35, 36, 35, 52, 37, 38, 55, 40>), dim3(num_blocks), dim3(threads_per_block), 0, 0, unchecked_hashes, bitsplitted_keys); } void des_25_encrypt_salt2939(const size_t num_blocks, const size_t threads_per_block, vtype* const unchecked_hashes, const vtype* const bitsplitted_keys) { hipLaunchKernelGGL(( des_25_encrypt<15, 16, 1, 18, 19, 20, 19, 4, 21, 22, 7, 24, 31, 0, 17, 2, 3, 4, 3, 20, 5, 6, 23, 8, 47, 48, 33, 50, 51, 52, 51, 36, 53, 54, 39, 56, 63, 32, 49, 34, 35, 36, 35, 52, 37, 38, 55, 40>), dim3(num_blocks), dim3(threads_per_block), 0, 0, unchecked_hashes, bitsplitted_keys); } void des_25_encrypt_salt2940(const size_t num_blocks, const size_t threads_per_block, vtype* const unchecked_hashes, const vtype* const bitsplitted_keys) { hipLaunchKernelGGL(( des_25_encrypt<31, 0, 17, 18, 19, 20, 19, 4, 21, 22, 7, 24, 15, 16, 1, 2, 3, 4, 3, 20, 5, 6, 23, 8, 63, 32, 49, 50, 51, 52, 51, 36, 53, 54, 39, 56, 47, 48, 33, 34, 35, 36, 35, 52, 37, 38, 55, 40>), dim3(num_blocks), dim3(threads_per_block), 0, 0, unchecked_hashes, bitsplitted_keys); } void des_25_encrypt_salt2941(const size_t num_blocks, const size_t threads_per_block, vtype* const unchecked_hashes, const vtype* const bitsplitted_keys) { hipLaunchKernelGGL(( des_25_encrypt<15, 0, 17, 18, 19, 20, 19, 4, 21, 22, 7, 24, 31, 16, 1, 2, 3, 4, 3, 20, 5, 6, 23, 8, 47, 32, 49, 50, 51, 52, 51, 36, 53, 54, 39, 56, 63, 48, 33, 34, 35, 36, 35, 52, 37, 38, 55, 40>), dim3(num_blocks), dim3(threads_per_block), 0, 0, unchecked_hashes, bitsplitted_keys); } void des_25_encrypt_salt2942(const size_t num_blocks, const size_t threads_per_block, vtype* const unchecked_hashes, const vtype* const bitsplitted_keys) { hipLaunchKernelGGL(( des_25_encrypt<31, 16, 17, 18, 19, 20, 19, 4, 21, 22, 7, 24, 15, 0, 1, 2, 3, 4, 3, 20, 5, 6, 23, 8, 63, 48, 49, 50, 51, 52, 51, 36, 53, 54, 39, 56, 47, 32, 33, 34, 35, 36, 35, 52, 37, 38, 55, 40>), dim3(num_blocks), dim3(threads_per_block), 0, 0, unchecked_hashes, bitsplitted_keys); } void des_25_encrypt_salt2943(const size_t num_blocks, const size_t threads_per_block, vtype* const unchecked_hashes, const vtype* const bitsplitted_keys) { hipLaunchKernelGGL(( des_25_encrypt<15, 16, 17, 18, 19, 20, 19, 4, 21, 22, 7, 24, 31, 0, 1, 2, 3, 4, 3, 20, 5, 6, 23, 8, 47, 48, 49, 50, 51, 52, 51, 36, 53, 54, 39, 56, 63, 32, 33, 34, 35, 36, 35, 52, 37, 38, 55, 40>), dim3(num_blocks), dim3(threads_per_block), 0, 0, unchecked_hashes, bitsplitted_keys); } #endif // DESGPU_COMPILE_ALL_SALTS
3d2f06411be5c4d97b13c8efaf38ff66da6bbcee.cu
#include "des_kernel_encrypt.h" #include "des_kernel_salt_instances.h" #ifdef DESGPU_COMPILE_ALL_SALTS void des_25_encrypt_salt2816(const size_t num_blocks, const size_t threads_per_block, vtype* const unchecked_hashes, const vtype* const bitsplitted_keys) { des_25_encrypt<31, 0, 1, 2, 3, 4, 3, 4, 21, 22, 7, 24, 15, 16, 17, 18, 19, 20, 19, 20, 5, 6, 23, 8, 63, 32, 33, 34, 35, 36, 35, 36, 53, 54, 39, 56, 47, 48, 49, 50, 51, 52, 51, 52, 37, 38, 55, 40><<<num_blocks, threads_per_block>>>(unchecked_hashes, bitsplitted_keys); } void des_25_encrypt_salt2817(const size_t num_blocks, const size_t threads_per_block, vtype* const unchecked_hashes, const vtype* const bitsplitted_keys) { des_25_encrypt<15, 0, 1, 2, 3, 4, 3, 4, 21, 22, 7, 24, 31, 16, 17, 18, 19, 20, 19, 20, 5, 6, 23, 8, 47, 32, 33, 34, 35, 36, 35, 36, 53, 54, 39, 56, 63, 48, 49, 50, 51, 52, 51, 52, 37, 38, 55, 40><<<num_blocks, threads_per_block>>>(unchecked_hashes, bitsplitted_keys); } void des_25_encrypt_salt2818(const size_t num_blocks, const size_t threads_per_block, vtype* const unchecked_hashes, const vtype* const bitsplitted_keys) { des_25_encrypt<31, 16, 1, 2, 3, 4, 3, 4, 21, 22, 7, 24, 15, 0, 17, 18, 19, 20, 19, 20, 5, 6, 23, 8, 63, 48, 33, 34, 35, 36, 35, 36, 53, 54, 39, 56, 47, 32, 49, 50, 51, 52, 51, 52, 37, 38, 55, 40><<<num_blocks, threads_per_block>>>(unchecked_hashes, bitsplitted_keys); } void des_25_encrypt_salt2819(const size_t num_blocks, const size_t threads_per_block, vtype* const unchecked_hashes, const vtype* const bitsplitted_keys) { des_25_encrypt<15, 16, 1, 2, 3, 4, 3, 4, 21, 22, 7, 24, 31, 0, 17, 18, 19, 20, 19, 20, 5, 6, 23, 8, 47, 48, 33, 34, 35, 36, 35, 36, 53, 54, 39, 56, 63, 32, 49, 50, 51, 52, 51, 52, 37, 38, 55, 40><<<num_blocks, threads_per_block>>>(unchecked_hashes, bitsplitted_keys); } void des_25_encrypt_salt2820(const size_t num_blocks, const size_t threads_per_block, vtype* const unchecked_hashes, const vtype* const bitsplitted_keys) { des_25_encrypt<31, 0, 17, 2, 3, 4, 3, 4, 21, 22, 7, 24, 15, 16, 1, 18, 19, 20, 19, 20, 5, 6, 23, 8, 63, 32, 49, 34, 35, 36, 35, 36, 53, 54, 39, 56, 47, 48, 33, 50, 51, 52, 51, 52, 37, 38, 55, 40><<<num_blocks, threads_per_block>>>(unchecked_hashes, bitsplitted_keys); } void des_25_encrypt_salt2821(const size_t num_blocks, const size_t threads_per_block, vtype* const unchecked_hashes, const vtype* const bitsplitted_keys) { des_25_encrypt<15, 0, 17, 2, 3, 4, 3, 4, 21, 22, 7, 24, 31, 16, 1, 18, 19, 20, 19, 20, 5, 6, 23, 8, 47, 32, 49, 34, 35, 36, 35, 36, 53, 54, 39, 56, 63, 48, 33, 50, 51, 52, 51, 52, 37, 38, 55, 40><<<num_blocks, threads_per_block>>>(unchecked_hashes, bitsplitted_keys); } void des_25_encrypt_salt2822(const size_t num_blocks, const size_t threads_per_block, vtype* const unchecked_hashes, const vtype* const bitsplitted_keys) { des_25_encrypt<31, 16, 17, 2, 3, 4, 3, 4, 21, 22, 7, 24, 15, 0, 1, 18, 19, 20, 19, 20, 5, 6, 23, 8, 63, 48, 49, 34, 35, 36, 35, 36, 53, 54, 39, 56, 47, 32, 33, 50, 51, 52, 51, 52, 37, 38, 55, 40><<<num_blocks, threads_per_block>>>(unchecked_hashes, bitsplitted_keys); } void des_25_encrypt_salt2823(const size_t num_blocks, const size_t threads_per_block, vtype* const unchecked_hashes, const vtype* const bitsplitted_keys) { des_25_encrypt<15, 16, 17, 2, 3, 4, 3, 4, 21, 22, 7, 24, 31, 0, 1, 18, 19, 20, 19, 20, 5, 6, 23, 8, 47, 48, 49, 34, 35, 36, 35, 36, 53, 54, 39, 56, 63, 32, 33, 50, 51, 52, 51, 52, 37, 38, 55, 40><<<num_blocks, threads_per_block>>>(unchecked_hashes, bitsplitted_keys); } void des_25_encrypt_salt2824(const size_t num_blocks, const size_t threads_per_block, vtype* const unchecked_hashes, const vtype* const bitsplitted_keys) { des_25_encrypt<31, 0, 1, 18, 3, 4, 3, 4, 21, 22, 7, 24, 15, 16, 17, 2, 19, 20, 19, 20, 5, 6, 23, 8, 63, 32, 33, 50, 35, 36, 35, 36, 53, 54, 39, 56, 47, 48, 49, 34, 51, 52, 51, 52, 37, 38, 55, 40><<<num_blocks, threads_per_block>>>(unchecked_hashes, bitsplitted_keys); } void des_25_encrypt_salt2825(const size_t num_blocks, const size_t threads_per_block, vtype* const unchecked_hashes, const vtype* const bitsplitted_keys) { des_25_encrypt<15, 0, 1, 18, 3, 4, 3, 4, 21, 22, 7, 24, 31, 16, 17, 2, 19, 20, 19, 20, 5, 6, 23, 8, 47, 32, 33, 50, 35, 36, 35, 36, 53, 54, 39, 56, 63, 48, 49, 34, 51, 52, 51, 52, 37, 38, 55, 40><<<num_blocks, threads_per_block>>>(unchecked_hashes, bitsplitted_keys); } void des_25_encrypt_salt2826(const size_t num_blocks, const size_t threads_per_block, vtype* const unchecked_hashes, const vtype* const bitsplitted_keys) { des_25_encrypt<31, 16, 1, 18, 3, 4, 3, 4, 21, 22, 7, 24, 15, 0, 17, 2, 19, 20, 19, 20, 5, 6, 23, 8, 63, 48, 33, 50, 35, 36, 35, 36, 53, 54, 39, 56, 47, 32, 49, 34, 51, 52, 51, 52, 37, 38, 55, 40><<<num_blocks, threads_per_block>>>(unchecked_hashes, bitsplitted_keys); } void des_25_encrypt_salt2827(const size_t num_blocks, const size_t threads_per_block, vtype* const unchecked_hashes, const vtype* const bitsplitted_keys) { des_25_encrypt<15, 16, 1, 18, 3, 4, 3, 4, 21, 22, 7, 24, 31, 0, 17, 2, 19, 20, 19, 20, 5, 6, 23, 8, 47, 48, 33, 50, 35, 36, 35, 36, 53, 54, 39, 56, 63, 32, 49, 34, 51, 52, 51, 52, 37, 38, 55, 40><<<num_blocks, threads_per_block>>>(unchecked_hashes, bitsplitted_keys); } void des_25_encrypt_salt2828(const size_t num_blocks, const size_t threads_per_block, vtype* const unchecked_hashes, const vtype* const bitsplitted_keys) { des_25_encrypt<31, 0, 17, 18, 3, 4, 3, 4, 21, 22, 7, 24, 15, 16, 1, 2, 19, 20, 19, 20, 5, 6, 23, 8, 63, 32, 49, 50, 35, 36, 35, 36, 53, 54, 39, 56, 47, 48, 33, 34, 51, 52, 51, 52, 37, 38, 55, 40><<<num_blocks, threads_per_block>>>(unchecked_hashes, bitsplitted_keys); } void des_25_encrypt_salt2829(const size_t num_blocks, const size_t threads_per_block, vtype* const unchecked_hashes, const vtype* const bitsplitted_keys) { des_25_encrypt<15, 0, 17, 18, 3, 4, 3, 4, 21, 22, 7, 24, 31, 16, 1, 2, 19, 20, 19, 20, 5, 6, 23, 8, 47, 32, 49, 50, 35, 36, 35, 36, 53, 54, 39, 56, 63, 48, 33, 34, 51, 52, 51, 52, 37, 38, 55, 40><<<num_blocks, threads_per_block>>>(unchecked_hashes, bitsplitted_keys); } void des_25_encrypt_salt2830(const size_t num_blocks, const size_t threads_per_block, vtype* const unchecked_hashes, const vtype* const bitsplitted_keys) { des_25_encrypt<31, 16, 17, 18, 3, 4, 3, 4, 21, 22, 7, 24, 15, 0, 1, 2, 19, 20, 19, 20, 5, 6, 23, 8, 63, 48, 49, 50, 35, 36, 35, 36, 53, 54, 39, 56, 47, 32, 33, 34, 51, 52, 51, 52, 37, 38, 55, 40><<<num_blocks, threads_per_block>>>(unchecked_hashes, bitsplitted_keys); } void des_25_encrypt_salt2831(const size_t num_blocks, const size_t threads_per_block, vtype* const unchecked_hashes, const vtype* const bitsplitted_keys) { des_25_encrypt<15, 16, 17, 18, 3, 4, 3, 4, 21, 22, 7, 24, 31, 0, 1, 2, 19, 20, 19, 20, 5, 6, 23, 8, 47, 48, 49, 50, 35, 36, 35, 36, 53, 54, 39, 56, 63, 32, 33, 34, 51, 52, 51, 52, 37, 38, 55, 40><<<num_blocks, threads_per_block>>>(unchecked_hashes, bitsplitted_keys); } void des_25_encrypt_salt2832(const size_t num_blocks, const size_t threads_per_block, vtype* const unchecked_hashes, const vtype* const bitsplitted_keys) { des_25_encrypt<31, 0, 1, 2, 19, 4, 3, 4, 21, 22, 7, 24, 15, 16, 17, 18, 3, 20, 19, 20, 5, 6, 23, 8, 63, 32, 33, 34, 51, 36, 35, 36, 53, 54, 39, 56, 47, 48, 49, 50, 35, 52, 51, 52, 37, 38, 55, 40><<<num_blocks, threads_per_block>>>(unchecked_hashes, bitsplitted_keys); } void des_25_encrypt_salt2833(const size_t num_blocks, const size_t threads_per_block, vtype* const unchecked_hashes, const vtype* const bitsplitted_keys) { des_25_encrypt<15, 0, 1, 2, 19, 4, 3, 4, 21, 22, 7, 24, 31, 16, 17, 18, 3, 20, 19, 20, 5, 6, 23, 8, 47, 32, 33, 34, 51, 36, 35, 36, 53, 54, 39, 56, 63, 48, 49, 50, 35, 52, 51, 52, 37, 38, 55, 40><<<num_blocks, threads_per_block>>>(unchecked_hashes, bitsplitted_keys); } void des_25_encrypt_salt2834(const size_t num_blocks, const size_t threads_per_block, vtype* const unchecked_hashes, const vtype* const bitsplitted_keys) { des_25_encrypt<31, 16, 1, 2, 19, 4, 3, 4, 21, 22, 7, 24, 15, 0, 17, 18, 3, 20, 19, 20, 5, 6, 23, 8, 63, 48, 33, 34, 51, 36, 35, 36, 53, 54, 39, 56, 47, 32, 49, 50, 35, 52, 51, 52, 37, 38, 55, 40><<<num_blocks, threads_per_block>>>(unchecked_hashes, bitsplitted_keys); } void des_25_encrypt_salt2835(const size_t num_blocks, const size_t threads_per_block, vtype* const unchecked_hashes, const vtype* const bitsplitted_keys) { des_25_encrypt<15, 16, 1, 2, 19, 4, 3, 4, 21, 22, 7, 24, 31, 0, 17, 18, 3, 20, 19, 20, 5, 6, 23, 8, 47, 48, 33, 34, 51, 36, 35, 36, 53, 54, 39, 56, 63, 32, 49, 50, 35, 52, 51, 52, 37, 38, 55, 40><<<num_blocks, threads_per_block>>>(unchecked_hashes, bitsplitted_keys); } void des_25_encrypt_salt2836(const size_t num_blocks, const size_t threads_per_block, vtype* const unchecked_hashes, const vtype* const bitsplitted_keys) { des_25_encrypt<31, 0, 17, 2, 19, 4, 3, 4, 21, 22, 7, 24, 15, 16, 1, 18, 3, 20, 19, 20, 5, 6, 23, 8, 63, 32, 49, 34, 51, 36, 35, 36, 53, 54, 39, 56, 47, 48, 33, 50, 35, 52, 51, 52, 37, 38, 55, 40><<<num_blocks, threads_per_block>>>(unchecked_hashes, bitsplitted_keys); } void des_25_encrypt_salt2837(const size_t num_blocks, const size_t threads_per_block, vtype* const unchecked_hashes, const vtype* const bitsplitted_keys) { des_25_encrypt<15, 0, 17, 2, 19, 4, 3, 4, 21, 22, 7, 24, 31, 16, 1, 18, 3, 20, 19, 20, 5, 6, 23, 8, 47, 32, 49, 34, 51, 36, 35, 36, 53, 54, 39, 56, 63, 48, 33, 50, 35, 52, 51, 52, 37, 38, 55, 40><<<num_blocks, threads_per_block>>>(unchecked_hashes, bitsplitted_keys); } void des_25_encrypt_salt2838(const size_t num_blocks, const size_t threads_per_block, vtype* const unchecked_hashes, const vtype* const bitsplitted_keys) { des_25_encrypt<31, 16, 17, 2, 19, 4, 3, 4, 21, 22, 7, 24, 15, 0, 1, 18, 3, 20, 19, 20, 5, 6, 23, 8, 63, 48, 49, 34, 51, 36, 35, 36, 53, 54, 39, 56, 47, 32, 33, 50, 35, 52, 51, 52, 37, 38, 55, 40><<<num_blocks, threads_per_block>>>(unchecked_hashes, bitsplitted_keys); } void des_25_encrypt_salt2839(const size_t num_blocks, const size_t threads_per_block, vtype* const unchecked_hashes, const vtype* const bitsplitted_keys) { des_25_encrypt<15, 16, 17, 2, 19, 4, 3, 4, 21, 22, 7, 24, 31, 0, 1, 18, 3, 20, 19, 20, 5, 6, 23, 8, 47, 48, 49, 34, 51, 36, 35, 36, 53, 54, 39, 56, 63, 32, 33, 50, 35, 52, 51, 52, 37, 38, 55, 40><<<num_blocks, threads_per_block>>>(unchecked_hashes, bitsplitted_keys); } void des_25_encrypt_salt2840(const size_t num_blocks, const size_t threads_per_block, vtype* const unchecked_hashes, const vtype* const bitsplitted_keys) { des_25_encrypt<31, 0, 1, 18, 19, 4, 3, 4, 21, 22, 7, 24, 15, 16, 17, 2, 3, 20, 19, 20, 5, 6, 23, 8, 63, 32, 33, 50, 51, 36, 35, 36, 53, 54, 39, 56, 47, 48, 49, 34, 35, 52, 51, 52, 37, 38, 55, 40><<<num_blocks, threads_per_block>>>(unchecked_hashes, bitsplitted_keys); } void des_25_encrypt_salt2841(const size_t num_blocks, const size_t threads_per_block, vtype* const unchecked_hashes, const vtype* const bitsplitted_keys) { des_25_encrypt<15, 0, 1, 18, 19, 4, 3, 4, 21, 22, 7, 24, 31, 16, 17, 2, 3, 20, 19, 20, 5, 6, 23, 8, 47, 32, 33, 50, 51, 36, 35, 36, 53, 54, 39, 56, 63, 48, 49, 34, 35, 52, 51, 52, 37, 38, 55, 40><<<num_blocks, threads_per_block>>>(unchecked_hashes, bitsplitted_keys); } void des_25_encrypt_salt2842(const size_t num_blocks, const size_t threads_per_block, vtype* const unchecked_hashes, const vtype* const bitsplitted_keys) { des_25_encrypt<31, 16, 1, 18, 19, 4, 3, 4, 21, 22, 7, 24, 15, 0, 17, 2, 3, 20, 19, 20, 5, 6, 23, 8, 63, 48, 33, 50, 51, 36, 35, 36, 53, 54, 39, 56, 47, 32, 49, 34, 35, 52, 51, 52, 37, 38, 55, 40><<<num_blocks, threads_per_block>>>(unchecked_hashes, bitsplitted_keys); } void des_25_encrypt_salt2843(const size_t num_blocks, const size_t threads_per_block, vtype* const unchecked_hashes, const vtype* const bitsplitted_keys) { des_25_encrypt<15, 16, 1, 18, 19, 4, 3, 4, 21, 22, 7, 24, 31, 0, 17, 2, 3, 20, 19, 20, 5, 6, 23, 8, 47, 48, 33, 50, 51, 36, 35, 36, 53, 54, 39, 56, 63, 32, 49, 34, 35, 52, 51, 52, 37, 38, 55, 40><<<num_blocks, threads_per_block>>>(unchecked_hashes, bitsplitted_keys); } void des_25_encrypt_salt2844(const size_t num_blocks, const size_t threads_per_block, vtype* const unchecked_hashes, const vtype* const bitsplitted_keys) { des_25_encrypt<31, 0, 17, 18, 19, 4, 3, 4, 21, 22, 7, 24, 15, 16, 1, 2, 3, 20, 19, 20, 5, 6, 23, 8, 63, 32, 49, 50, 51, 36, 35, 36, 53, 54, 39, 56, 47, 48, 33, 34, 35, 52, 51, 52, 37, 38, 55, 40><<<num_blocks, threads_per_block>>>(unchecked_hashes, bitsplitted_keys); } void des_25_encrypt_salt2845(const size_t num_blocks, const size_t threads_per_block, vtype* const unchecked_hashes, const vtype* const bitsplitted_keys) { des_25_encrypt<15, 0, 17, 18, 19, 4, 3, 4, 21, 22, 7, 24, 31, 16, 1, 2, 3, 20, 19, 20, 5, 6, 23, 8, 47, 32, 49, 50, 51, 36, 35, 36, 53, 54, 39, 56, 63, 48, 33, 34, 35, 52, 51, 52, 37, 38, 55, 40><<<num_blocks, threads_per_block>>>(unchecked_hashes, bitsplitted_keys); } void des_25_encrypt_salt2846(const size_t num_blocks, const size_t threads_per_block, vtype* const unchecked_hashes, const vtype* const bitsplitted_keys) { des_25_encrypt<31, 16, 17, 18, 19, 4, 3, 4, 21, 22, 7, 24, 15, 0, 1, 2, 3, 20, 19, 20, 5, 6, 23, 8, 63, 48, 49, 50, 51, 36, 35, 36, 53, 54, 39, 56, 47, 32, 33, 34, 35, 52, 51, 52, 37, 38, 55, 40><<<num_blocks, threads_per_block>>>(unchecked_hashes, bitsplitted_keys); } void des_25_encrypt_salt2847(const size_t num_blocks, const size_t threads_per_block, vtype* const unchecked_hashes, const vtype* const bitsplitted_keys) { des_25_encrypt<15, 16, 17, 18, 19, 4, 3, 4, 21, 22, 7, 24, 31, 0, 1, 2, 3, 20, 19, 20, 5, 6, 23, 8, 47, 48, 49, 50, 51, 36, 35, 36, 53, 54, 39, 56, 63, 32, 33, 34, 35, 52, 51, 52, 37, 38, 55, 40><<<num_blocks, threads_per_block>>>(unchecked_hashes, bitsplitted_keys); } void des_25_encrypt_salt2848(const size_t num_blocks, const size_t threads_per_block, vtype* const unchecked_hashes, const vtype* const bitsplitted_keys) { des_25_encrypt<31, 0, 1, 2, 3, 20, 3, 4, 21, 22, 7, 24, 15, 16, 17, 18, 19, 4, 19, 20, 5, 6, 23, 8, 63, 32, 33, 34, 35, 52, 35, 36, 53, 54, 39, 56, 47, 48, 49, 50, 51, 36, 51, 52, 37, 38, 55, 40><<<num_blocks, threads_per_block>>>(unchecked_hashes, bitsplitted_keys); } void des_25_encrypt_salt2849(const size_t num_blocks, const size_t threads_per_block, vtype* const unchecked_hashes, const vtype* const bitsplitted_keys) { des_25_encrypt<15, 0, 1, 2, 3, 20, 3, 4, 21, 22, 7, 24, 31, 16, 17, 18, 19, 4, 19, 20, 5, 6, 23, 8, 47, 32, 33, 34, 35, 52, 35, 36, 53, 54, 39, 56, 63, 48, 49, 50, 51, 36, 51, 52, 37, 38, 55, 40><<<num_blocks, threads_per_block>>>(unchecked_hashes, bitsplitted_keys); } void des_25_encrypt_salt2850(const size_t num_blocks, const size_t threads_per_block, vtype* const unchecked_hashes, const vtype* const bitsplitted_keys) { des_25_encrypt<31, 16, 1, 2, 3, 20, 3, 4, 21, 22, 7, 24, 15, 0, 17, 18, 19, 4, 19, 20, 5, 6, 23, 8, 63, 48, 33, 34, 35, 52, 35, 36, 53, 54, 39, 56, 47, 32, 49, 50, 51, 36, 51, 52, 37, 38, 55, 40><<<num_blocks, threads_per_block>>>(unchecked_hashes, bitsplitted_keys); } void des_25_encrypt_salt2851(const size_t num_blocks, const size_t threads_per_block, vtype* const unchecked_hashes, const vtype* const bitsplitted_keys) { des_25_encrypt<15, 16, 1, 2, 3, 20, 3, 4, 21, 22, 7, 24, 31, 0, 17, 18, 19, 4, 19, 20, 5, 6, 23, 8, 47, 48, 33, 34, 35, 52, 35, 36, 53, 54, 39, 56, 63, 32, 49, 50, 51, 36, 51, 52, 37, 38, 55, 40><<<num_blocks, threads_per_block>>>(unchecked_hashes, bitsplitted_keys); } void des_25_encrypt_salt2852(const size_t num_blocks, const size_t threads_per_block, vtype* const unchecked_hashes, const vtype* const bitsplitted_keys) { des_25_encrypt<31, 0, 17, 2, 3, 20, 3, 4, 21, 22, 7, 24, 15, 16, 1, 18, 19, 4, 19, 20, 5, 6, 23, 8, 63, 32, 49, 34, 35, 52, 35, 36, 53, 54, 39, 56, 47, 48, 33, 50, 51, 36, 51, 52, 37, 38, 55, 40><<<num_blocks, threads_per_block>>>(unchecked_hashes, bitsplitted_keys); } void des_25_encrypt_salt2853(const size_t num_blocks, const size_t threads_per_block, vtype* const unchecked_hashes, const vtype* const bitsplitted_keys) { des_25_encrypt<15, 0, 17, 2, 3, 20, 3, 4, 21, 22, 7, 24, 31, 16, 1, 18, 19, 4, 19, 20, 5, 6, 23, 8, 47, 32, 49, 34, 35, 52, 35, 36, 53, 54, 39, 56, 63, 48, 33, 50, 51, 36, 51, 52, 37, 38, 55, 40><<<num_blocks, threads_per_block>>>(unchecked_hashes, bitsplitted_keys); } void des_25_encrypt_salt2854(const size_t num_blocks, const size_t threads_per_block, vtype* const unchecked_hashes, const vtype* const bitsplitted_keys) { des_25_encrypt<31, 16, 17, 2, 3, 20, 3, 4, 21, 22, 7, 24, 15, 0, 1, 18, 19, 4, 19, 20, 5, 6, 23, 8, 63, 48, 49, 34, 35, 52, 35, 36, 53, 54, 39, 56, 47, 32, 33, 50, 51, 36, 51, 52, 37, 38, 55, 40><<<num_blocks, threads_per_block>>>(unchecked_hashes, bitsplitted_keys); } void des_25_encrypt_salt2855(const size_t num_blocks, const size_t threads_per_block, vtype* const unchecked_hashes, const vtype* const bitsplitted_keys) { des_25_encrypt<15, 16, 17, 2, 3, 20, 3, 4, 21, 22, 7, 24, 31, 0, 1, 18, 19, 4, 19, 20, 5, 6, 23, 8, 47, 48, 49, 34, 35, 52, 35, 36, 53, 54, 39, 56, 63, 32, 33, 50, 51, 36, 51, 52, 37, 38, 55, 40><<<num_blocks, threads_per_block>>>(unchecked_hashes, bitsplitted_keys); } void des_25_encrypt_salt2856(const size_t num_blocks, const size_t threads_per_block, vtype* const unchecked_hashes, const vtype* const bitsplitted_keys) { des_25_encrypt<31, 0, 1, 18, 3, 20, 3, 4, 21, 22, 7, 24, 15, 16, 17, 2, 19, 4, 19, 20, 5, 6, 23, 8, 63, 32, 33, 50, 35, 52, 35, 36, 53, 54, 39, 56, 47, 48, 49, 34, 51, 36, 51, 52, 37, 38, 55, 40><<<num_blocks, threads_per_block>>>(unchecked_hashes, bitsplitted_keys); } void des_25_encrypt_salt2857(const size_t num_blocks, const size_t threads_per_block, vtype* const unchecked_hashes, const vtype* const bitsplitted_keys) { des_25_encrypt<15, 0, 1, 18, 3, 20, 3, 4, 21, 22, 7, 24, 31, 16, 17, 2, 19, 4, 19, 20, 5, 6, 23, 8, 47, 32, 33, 50, 35, 52, 35, 36, 53, 54, 39, 56, 63, 48, 49, 34, 51, 36, 51, 52, 37, 38, 55, 40><<<num_blocks, threads_per_block>>>(unchecked_hashes, bitsplitted_keys); } void des_25_encrypt_salt2858(const size_t num_blocks, const size_t threads_per_block, vtype* const unchecked_hashes, const vtype* const bitsplitted_keys) { des_25_encrypt<31, 16, 1, 18, 3, 20, 3, 4, 21, 22, 7, 24, 15, 0, 17, 2, 19, 4, 19, 20, 5, 6, 23, 8, 63, 48, 33, 50, 35, 52, 35, 36, 53, 54, 39, 56, 47, 32, 49, 34, 51, 36, 51, 52, 37, 38, 55, 40><<<num_blocks, threads_per_block>>>(unchecked_hashes, bitsplitted_keys); } void des_25_encrypt_salt2859(const size_t num_blocks, const size_t threads_per_block, vtype* const unchecked_hashes, const vtype* const bitsplitted_keys) { des_25_encrypt<15, 16, 1, 18, 3, 20, 3, 4, 21, 22, 7, 24, 31, 0, 17, 2, 19, 4, 19, 20, 5, 6, 23, 8, 47, 48, 33, 50, 35, 52, 35, 36, 53, 54, 39, 56, 63, 32, 49, 34, 51, 36, 51, 52, 37, 38, 55, 40><<<num_blocks, threads_per_block>>>(unchecked_hashes, bitsplitted_keys); } void des_25_encrypt_salt2860(const size_t num_blocks, const size_t threads_per_block, vtype* const unchecked_hashes, const vtype* const bitsplitted_keys) { des_25_encrypt<31, 0, 17, 18, 3, 20, 3, 4, 21, 22, 7, 24, 15, 16, 1, 2, 19, 4, 19, 20, 5, 6, 23, 8, 63, 32, 49, 50, 35, 52, 35, 36, 53, 54, 39, 56, 47, 48, 33, 34, 51, 36, 51, 52, 37, 38, 55, 40><<<num_blocks, threads_per_block>>>(unchecked_hashes, bitsplitted_keys); } void des_25_encrypt_salt2861(const size_t num_blocks, const size_t threads_per_block, vtype* const unchecked_hashes, const vtype* const bitsplitted_keys) { des_25_encrypt<15, 0, 17, 18, 3, 20, 3, 4, 21, 22, 7, 24, 31, 16, 1, 2, 19, 4, 19, 20, 5, 6, 23, 8, 47, 32, 49, 50, 35, 52, 35, 36, 53, 54, 39, 56, 63, 48, 33, 34, 51, 36, 51, 52, 37, 38, 55, 40><<<num_blocks, threads_per_block>>>(unchecked_hashes, bitsplitted_keys); } void des_25_encrypt_salt2862(const size_t num_blocks, const size_t threads_per_block, vtype* const unchecked_hashes, const vtype* const bitsplitted_keys) { des_25_encrypt<31, 16, 17, 18, 3, 20, 3, 4, 21, 22, 7, 24, 15, 0, 1, 2, 19, 4, 19, 20, 5, 6, 23, 8, 63, 48, 49, 50, 35, 52, 35, 36, 53, 54, 39, 56, 47, 32, 33, 34, 51, 36, 51, 52, 37, 38, 55, 40><<<num_blocks, threads_per_block>>>(unchecked_hashes, bitsplitted_keys); } void des_25_encrypt_salt2863(const size_t num_blocks, const size_t threads_per_block, vtype* const unchecked_hashes, const vtype* const bitsplitted_keys) { des_25_encrypt<15, 16, 17, 18, 3, 20, 3, 4, 21, 22, 7, 24, 31, 0, 1, 2, 19, 4, 19, 20, 5, 6, 23, 8, 47, 48, 49, 50, 35, 52, 35, 36, 53, 54, 39, 56, 63, 32, 33, 34, 51, 36, 51, 52, 37, 38, 55, 40><<<num_blocks, threads_per_block>>>(unchecked_hashes, bitsplitted_keys); } void des_25_encrypt_salt2864(const size_t num_blocks, const size_t threads_per_block, vtype* const unchecked_hashes, const vtype* const bitsplitted_keys) { des_25_encrypt<31, 0, 1, 2, 19, 20, 3, 4, 21, 22, 7, 24, 15, 16, 17, 18, 3, 4, 19, 20, 5, 6, 23, 8, 63, 32, 33, 34, 51, 52, 35, 36, 53, 54, 39, 56, 47, 48, 49, 50, 35, 36, 51, 52, 37, 38, 55, 40><<<num_blocks, threads_per_block>>>(unchecked_hashes, bitsplitted_keys); } void des_25_encrypt_salt2865(const size_t num_blocks, const size_t threads_per_block, vtype* const unchecked_hashes, const vtype* const bitsplitted_keys) { des_25_encrypt<15, 0, 1, 2, 19, 20, 3, 4, 21, 22, 7, 24, 31, 16, 17, 18, 3, 4, 19, 20, 5, 6, 23, 8, 47, 32, 33, 34, 51, 52, 35, 36, 53, 54, 39, 56, 63, 48, 49, 50, 35, 36, 51, 52, 37, 38, 55, 40><<<num_blocks, threads_per_block>>>(unchecked_hashes, bitsplitted_keys); } void des_25_encrypt_salt2866(const size_t num_blocks, const size_t threads_per_block, vtype* const unchecked_hashes, const vtype* const bitsplitted_keys) { des_25_encrypt<31, 16, 1, 2, 19, 20, 3, 4, 21, 22, 7, 24, 15, 0, 17, 18, 3, 4, 19, 20, 5, 6, 23, 8, 63, 48, 33, 34, 51, 52, 35, 36, 53, 54, 39, 56, 47, 32, 49, 50, 35, 36, 51, 52, 37, 38, 55, 40><<<num_blocks, threads_per_block>>>(unchecked_hashes, bitsplitted_keys); } void des_25_encrypt_salt2867(const size_t num_blocks, const size_t threads_per_block, vtype* const unchecked_hashes, const vtype* const bitsplitted_keys) { des_25_encrypt<15, 16, 1, 2, 19, 20, 3, 4, 21, 22, 7, 24, 31, 0, 17, 18, 3, 4, 19, 20, 5, 6, 23, 8, 47, 48, 33, 34, 51, 52, 35, 36, 53, 54, 39, 56, 63, 32, 49, 50, 35, 36, 51, 52, 37, 38, 55, 40><<<num_blocks, threads_per_block>>>(unchecked_hashes, bitsplitted_keys); } void des_25_encrypt_salt2868(const size_t num_blocks, const size_t threads_per_block, vtype* const unchecked_hashes, const vtype* const bitsplitted_keys) { des_25_encrypt<31, 0, 17, 2, 19, 20, 3, 4, 21, 22, 7, 24, 15, 16, 1, 18, 3, 4, 19, 20, 5, 6, 23, 8, 63, 32, 49, 34, 51, 52, 35, 36, 53, 54, 39, 56, 47, 48, 33, 50, 35, 36, 51, 52, 37, 38, 55, 40><<<num_blocks, threads_per_block>>>(unchecked_hashes, bitsplitted_keys); } void des_25_encrypt_salt2869(const size_t num_blocks, const size_t threads_per_block, vtype* const unchecked_hashes, const vtype* const bitsplitted_keys) { des_25_encrypt<15, 0, 17, 2, 19, 20, 3, 4, 21, 22, 7, 24, 31, 16, 1, 18, 3, 4, 19, 20, 5, 6, 23, 8, 47, 32, 49, 34, 51, 52, 35, 36, 53, 54, 39, 56, 63, 48, 33, 50, 35, 36, 51, 52, 37, 38, 55, 40><<<num_blocks, threads_per_block>>>(unchecked_hashes, bitsplitted_keys); } void des_25_encrypt_salt2870(const size_t num_blocks, const size_t threads_per_block, vtype* const unchecked_hashes, const vtype* const bitsplitted_keys) { des_25_encrypt<31, 16, 17, 2, 19, 20, 3, 4, 21, 22, 7, 24, 15, 0, 1, 18, 3, 4, 19, 20, 5, 6, 23, 8, 63, 48, 49, 34, 51, 52, 35, 36, 53, 54, 39, 56, 47, 32, 33, 50, 35, 36, 51, 52, 37, 38, 55, 40><<<num_blocks, threads_per_block>>>(unchecked_hashes, bitsplitted_keys); } void des_25_encrypt_salt2871(const size_t num_blocks, const size_t threads_per_block, vtype* const unchecked_hashes, const vtype* const bitsplitted_keys) { des_25_encrypt<15, 16, 17, 2, 19, 20, 3, 4, 21, 22, 7, 24, 31, 0, 1, 18, 3, 4, 19, 20, 5, 6, 23, 8, 47, 48, 49, 34, 51, 52, 35, 36, 53, 54, 39, 56, 63, 32, 33, 50, 35, 36, 51, 52, 37, 38, 55, 40><<<num_blocks, threads_per_block>>>(unchecked_hashes, bitsplitted_keys); } void des_25_encrypt_salt2872(const size_t num_blocks, const size_t threads_per_block, vtype* const unchecked_hashes, const vtype* const bitsplitted_keys) { des_25_encrypt<31, 0, 1, 18, 19, 20, 3, 4, 21, 22, 7, 24, 15, 16, 17, 2, 3, 4, 19, 20, 5, 6, 23, 8, 63, 32, 33, 50, 51, 52, 35, 36, 53, 54, 39, 56, 47, 48, 49, 34, 35, 36, 51, 52, 37, 38, 55, 40><<<num_blocks, threads_per_block>>>(unchecked_hashes, bitsplitted_keys); } void des_25_encrypt_salt2873(const size_t num_blocks, const size_t threads_per_block, vtype* const unchecked_hashes, const vtype* const bitsplitted_keys) { des_25_encrypt<15, 0, 1, 18, 19, 20, 3, 4, 21, 22, 7, 24, 31, 16, 17, 2, 3, 4, 19, 20, 5, 6, 23, 8, 47, 32, 33, 50, 51, 52, 35, 36, 53, 54, 39, 56, 63, 48, 49, 34, 35, 36, 51, 52, 37, 38, 55, 40><<<num_blocks, threads_per_block>>>(unchecked_hashes, bitsplitted_keys); } void des_25_encrypt_salt2874(const size_t num_blocks, const size_t threads_per_block, vtype* const unchecked_hashes, const vtype* const bitsplitted_keys) { des_25_encrypt<31, 16, 1, 18, 19, 20, 3, 4, 21, 22, 7, 24, 15, 0, 17, 2, 3, 4, 19, 20, 5, 6, 23, 8, 63, 48, 33, 50, 51, 52, 35, 36, 53, 54, 39, 56, 47, 32, 49, 34, 35, 36, 51, 52, 37, 38, 55, 40><<<num_blocks, threads_per_block>>>(unchecked_hashes, bitsplitted_keys); } void des_25_encrypt_salt2875(const size_t num_blocks, const size_t threads_per_block, vtype* const unchecked_hashes, const vtype* const bitsplitted_keys) { des_25_encrypt<15, 16, 1, 18, 19, 20, 3, 4, 21, 22, 7, 24, 31, 0, 17, 2, 3, 4, 19, 20, 5, 6, 23, 8, 47, 48, 33, 50, 51, 52, 35, 36, 53, 54, 39, 56, 63, 32, 49, 34, 35, 36, 51, 52, 37, 38, 55, 40><<<num_blocks, threads_per_block>>>(unchecked_hashes, bitsplitted_keys); } void des_25_encrypt_salt2876(const size_t num_blocks, const size_t threads_per_block, vtype* const unchecked_hashes, const vtype* const bitsplitted_keys) { des_25_encrypt<31, 0, 17, 18, 19, 20, 3, 4, 21, 22, 7, 24, 15, 16, 1, 2, 3, 4, 19, 20, 5, 6, 23, 8, 63, 32, 49, 50, 51, 52, 35, 36, 53, 54, 39, 56, 47, 48, 33, 34, 35, 36, 51, 52, 37, 38, 55, 40><<<num_blocks, threads_per_block>>>(unchecked_hashes, bitsplitted_keys); } void des_25_encrypt_salt2877(const size_t num_blocks, const size_t threads_per_block, vtype* const unchecked_hashes, const vtype* const bitsplitted_keys) { des_25_encrypt<15, 0, 17, 18, 19, 20, 3, 4, 21, 22, 7, 24, 31, 16, 1, 2, 3, 4, 19, 20, 5, 6, 23, 8, 47, 32, 49, 50, 51, 52, 35, 36, 53, 54, 39, 56, 63, 48, 33, 34, 35, 36, 51, 52, 37, 38, 55, 40><<<num_blocks, threads_per_block>>>(unchecked_hashes, bitsplitted_keys); } void des_25_encrypt_salt2878(const size_t num_blocks, const size_t threads_per_block, vtype* const unchecked_hashes, const vtype* const bitsplitted_keys) { des_25_encrypt<31, 16, 17, 18, 19, 20, 3, 4, 21, 22, 7, 24, 15, 0, 1, 2, 3, 4, 19, 20, 5, 6, 23, 8, 63, 48, 49, 50, 51, 52, 35, 36, 53, 54, 39, 56, 47, 32, 33, 34, 35, 36, 51, 52, 37, 38, 55, 40><<<num_blocks, threads_per_block>>>(unchecked_hashes, bitsplitted_keys); } void des_25_encrypt_salt2879(const size_t num_blocks, const size_t threads_per_block, vtype* const unchecked_hashes, const vtype* const bitsplitted_keys) { des_25_encrypt<15, 16, 17, 18, 19, 20, 3, 4, 21, 22, 7, 24, 31, 0, 1, 2, 3, 4, 19, 20, 5, 6, 23, 8, 47, 48, 49, 50, 51, 52, 35, 36, 53, 54, 39, 56, 63, 32, 33, 34, 35, 36, 51, 52, 37, 38, 55, 40><<<num_blocks, threads_per_block>>>(unchecked_hashes, bitsplitted_keys); } void des_25_encrypt_salt2880(const size_t num_blocks, const size_t threads_per_block, vtype* const unchecked_hashes, const vtype* const bitsplitted_keys) { des_25_encrypt<31, 0, 1, 2, 3, 4, 19, 4, 21, 22, 7, 24, 15, 16, 17, 18, 19, 20, 3, 20, 5, 6, 23, 8, 63, 32, 33, 34, 35, 36, 51, 36, 53, 54, 39, 56, 47, 48, 49, 50, 51, 52, 35, 52, 37, 38, 55, 40><<<num_blocks, threads_per_block>>>(unchecked_hashes, bitsplitted_keys); } void des_25_encrypt_salt2881(const size_t num_blocks, const size_t threads_per_block, vtype* const unchecked_hashes, const vtype* const bitsplitted_keys) { des_25_encrypt<15, 0, 1, 2, 3, 4, 19, 4, 21, 22, 7, 24, 31, 16, 17, 18, 19, 20, 3, 20, 5, 6, 23, 8, 47, 32, 33, 34, 35, 36, 51, 36, 53, 54, 39, 56, 63, 48, 49, 50, 51, 52, 35, 52, 37, 38, 55, 40><<<num_blocks, threads_per_block>>>(unchecked_hashes, bitsplitted_keys); } void des_25_encrypt_salt2882(const size_t num_blocks, const size_t threads_per_block, vtype* const unchecked_hashes, const vtype* const bitsplitted_keys) { des_25_encrypt<31, 16, 1, 2, 3, 4, 19, 4, 21, 22, 7, 24, 15, 0, 17, 18, 19, 20, 3, 20, 5, 6, 23, 8, 63, 48, 33, 34, 35, 36, 51, 36, 53, 54, 39, 56, 47, 32, 49, 50, 51, 52, 35, 52, 37, 38, 55, 40><<<num_blocks, threads_per_block>>>(unchecked_hashes, bitsplitted_keys); } void des_25_encrypt_salt2883(const size_t num_blocks, const size_t threads_per_block, vtype* const unchecked_hashes, const vtype* const bitsplitted_keys) { des_25_encrypt<15, 16, 1, 2, 3, 4, 19, 4, 21, 22, 7, 24, 31, 0, 17, 18, 19, 20, 3, 20, 5, 6, 23, 8, 47, 48, 33, 34, 35, 36, 51, 36, 53, 54, 39, 56, 63, 32, 49, 50, 51, 52, 35, 52, 37, 38, 55, 40><<<num_blocks, threads_per_block>>>(unchecked_hashes, bitsplitted_keys); } void des_25_encrypt_salt2884(const size_t num_blocks, const size_t threads_per_block, vtype* const unchecked_hashes, const vtype* const bitsplitted_keys) { des_25_encrypt<31, 0, 17, 2, 3, 4, 19, 4, 21, 22, 7, 24, 15, 16, 1, 18, 19, 20, 3, 20, 5, 6, 23, 8, 63, 32, 49, 34, 35, 36, 51, 36, 53, 54, 39, 56, 47, 48, 33, 50, 51, 52, 35, 52, 37, 38, 55, 40><<<num_blocks, threads_per_block>>>(unchecked_hashes, bitsplitted_keys); } void des_25_encrypt_salt2885(const size_t num_blocks, const size_t threads_per_block, vtype* const unchecked_hashes, const vtype* const bitsplitted_keys) { des_25_encrypt<15, 0, 17, 2, 3, 4, 19, 4, 21, 22, 7, 24, 31, 16, 1, 18, 19, 20, 3, 20, 5, 6, 23, 8, 47, 32, 49, 34, 35, 36, 51, 36, 53, 54, 39, 56, 63, 48, 33, 50, 51, 52, 35, 52, 37, 38, 55, 40><<<num_blocks, threads_per_block>>>(unchecked_hashes, bitsplitted_keys); } void des_25_encrypt_salt2886(const size_t num_blocks, const size_t threads_per_block, vtype* const unchecked_hashes, const vtype* const bitsplitted_keys) { des_25_encrypt<31, 16, 17, 2, 3, 4, 19, 4, 21, 22, 7, 24, 15, 0, 1, 18, 19, 20, 3, 20, 5, 6, 23, 8, 63, 48, 49, 34, 35, 36, 51, 36, 53, 54, 39, 56, 47, 32, 33, 50, 51, 52, 35, 52, 37, 38, 55, 40><<<num_blocks, threads_per_block>>>(unchecked_hashes, bitsplitted_keys); } void des_25_encrypt_salt2887(const size_t num_blocks, const size_t threads_per_block, vtype* const unchecked_hashes, const vtype* const bitsplitted_keys) { des_25_encrypt<15, 16, 17, 2, 3, 4, 19, 4, 21, 22, 7, 24, 31, 0, 1, 18, 19, 20, 3, 20, 5, 6, 23, 8, 47, 48, 49, 34, 35, 36, 51, 36, 53, 54, 39, 56, 63, 32, 33, 50, 51, 52, 35, 52, 37, 38, 55, 40><<<num_blocks, threads_per_block>>>(unchecked_hashes, bitsplitted_keys); } void des_25_encrypt_salt2888(const size_t num_blocks, const size_t threads_per_block, vtype* const unchecked_hashes, const vtype* const bitsplitted_keys) { des_25_encrypt<31, 0, 1, 18, 3, 4, 19, 4, 21, 22, 7, 24, 15, 16, 17, 2, 19, 20, 3, 20, 5, 6, 23, 8, 63, 32, 33, 50, 35, 36, 51, 36, 53, 54, 39, 56, 47, 48, 49, 34, 51, 52, 35, 52, 37, 38, 55, 40><<<num_blocks, threads_per_block>>>(unchecked_hashes, bitsplitted_keys); } void des_25_encrypt_salt2889(const size_t num_blocks, const size_t threads_per_block, vtype* const unchecked_hashes, const vtype* const bitsplitted_keys) { des_25_encrypt<15, 0, 1, 18, 3, 4, 19, 4, 21, 22, 7, 24, 31, 16, 17, 2, 19, 20, 3, 20, 5, 6, 23, 8, 47, 32, 33, 50, 35, 36, 51, 36, 53, 54, 39, 56, 63, 48, 49, 34, 51, 52, 35, 52, 37, 38, 55, 40><<<num_blocks, threads_per_block>>>(unchecked_hashes, bitsplitted_keys); } void des_25_encrypt_salt2890(const size_t num_blocks, const size_t threads_per_block, vtype* const unchecked_hashes, const vtype* const bitsplitted_keys) { des_25_encrypt<31, 16, 1, 18, 3, 4, 19, 4, 21, 22, 7, 24, 15, 0, 17, 2, 19, 20, 3, 20, 5, 6, 23, 8, 63, 48, 33, 50, 35, 36, 51, 36, 53, 54, 39, 56, 47, 32, 49, 34, 51, 52, 35, 52, 37, 38, 55, 40><<<num_blocks, threads_per_block>>>(unchecked_hashes, bitsplitted_keys); } void des_25_encrypt_salt2891(const size_t num_blocks, const size_t threads_per_block, vtype* const unchecked_hashes, const vtype* const bitsplitted_keys) { des_25_encrypt<15, 16, 1, 18, 3, 4, 19, 4, 21, 22, 7, 24, 31, 0, 17, 2, 19, 20, 3, 20, 5, 6, 23, 8, 47, 48, 33, 50, 35, 36, 51, 36, 53, 54, 39, 56, 63, 32, 49, 34, 51, 52, 35, 52, 37, 38, 55, 40><<<num_blocks, threads_per_block>>>(unchecked_hashes, bitsplitted_keys); } void des_25_encrypt_salt2892(const size_t num_blocks, const size_t threads_per_block, vtype* const unchecked_hashes, const vtype* const bitsplitted_keys) { des_25_encrypt<31, 0, 17, 18, 3, 4, 19, 4, 21, 22, 7, 24, 15, 16, 1, 2, 19, 20, 3, 20, 5, 6, 23, 8, 63, 32, 49, 50, 35, 36, 51, 36, 53, 54, 39, 56, 47, 48, 33, 34, 51, 52, 35, 52, 37, 38, 55, 40><<<num_blocks, threads_per_block>>>(unchecked_hashes, bitsplitted_keys); } void des_25_encrypt_salt2893(const size_t num_blocks, const size_t threads_per_block, vtype* const unchecked_hashes, const vtype* const bitsplitted_keys) { des_25_encrypt<15, 0, 17, 18, 3, 4, 19, 4, 21, 22, 7, 24, 31, 16, 1, 2, 19, 20, 3, 20, 5, 6, 23, 8, 47, 32, 49, 50, 35, 36, 51, 36, 53, 54, 39, 56, 63, 48, 33, 34, 51, 52, 35, 52, 37, 38, 55, 40><<<num_blocks, threads_per_block>>>(unchecked_hashes, bitsplitted_keys); } void des_25_encrypt_salt2894(const size_t num_blocks, const size_t threads_per_block, vtype* const unchecked_hashes, const vtype* const bitsplitted_keys) { des_25_encrypt<31, 16, 17, 18, 3, 4, 19, 4, 21, 22, 7, 24, 15, 0, 1, 2, 19, 20, 3, 20, 5, 6, 23, 8, 63, 48, 49, 50, 35, 36, 51, 36, 53, 54, 39, 56, 47, 32, 33, 34, 51, 52, 35, 52, 37, 38, 55, 40><<<num_blocks, threads_per_block>>>(unchecked_hashes, bitsplitted_keys); } void des_25_encrypt_salt2895(const size_t num_blocks, const size_t threads_per_block, vtype* const unchecked_hashes, const vtype* const bitsplitted_keys) { des_25_encrypt<15, 16, 17, 18, 3, 4, 19, 4, 21, 22, 7, 24, 31, 0, 1, 2, 19, 20, 3, 20, 5, 6, 23, 8, 47, 48, 49, 50, 35, 36, 51, 36, 53, 54, 39, 56, 63, 32, 33, 34, 51, 52, 35, 52, 37, 38, 55, 40><<<num_blocks, threads_per_block>>>(unchecked_hashes, bitsplitted_keys); } void des_25_encrypt_salt2896(const size_t num_blocks, const size_t threads_per_block, vtype* const unchecked_hashes, const vtype* const bitsplitted_keys) { des_25_encrypt<31, 0, 1, 2, 19, 4, 19, 4, 21, 22, 7, 24, 15, 16, 17, 18, 3, 20, 3, 20, 5, 6, 23, 8, 63, 32, 33, 34, 51, 36, 51, 36, 53, 54, 39, 56, 47, 48, 49, 50, 35, 52, 35, 52, 37, 38, 55, 40><<<num_blocks, threads_per_block>>>(unchecked_hashes, bitsplitted_keys); } void des_25_encrypt_salt2897(const size_t num_blocks, const size_t threads_per_block, vtype* const unchecked_hashes, const vtype* const bitsplitted_keys) { des_25_encrypt<15, 0, 1, 2, 19, 4, 19, 4, 21, 22, 7, 24, 31, 16, 17, 18, 3, 20, 3, 20, 5, 6, 23, 8, 47, 32, 33, 34, 51, 36, 51, 36, 53, 54, 39, 56, 63, 48, 49, 50, 35, 52, 35, 52, 37, 38, 55, 40><<<num_blocks, threads_per_block>>>(unchecked_hashes, bitsplitted_keys); } void des_25_encrypt_salt2898(const size_t num_blocks, const size_t threads_per_block, vtype* const unchecked_hashes, const vtype* const bitsplitted_keys) { des_25_encrypt<31, 16, 1, 2, 19, 4, 19, 4, 21, 22, 7, 24, 15, 0, 17, 18, 3, 20, 3, 20, 5, 6, 23, 8, 63, 48, 33, 34, 51, 36, 51, 36, 53, 54, 39, 56, 47, 32, 49, 50, 35, 52, 35, 52, 37, 38, 55, 40><<<num_blocks, threads_per_block>>>(unchecked_hashes, bitsplitted_keys); } void des_25_encrypt_salt2899(const size_t num_blocks, const size_t threads_per_block, vtype* const unchecked_hashes, const vtype* const bitsplitted_keys) { des_25_encrypt<15, 16, 1, 2, 19, 4, 19, 4, 21, 22, 7, 24, 31, 0, 17, 18, 3, 20, 3, 20, 5, 6, 23, 8, 47, 48, 33, 34, 51, 36, 51, 36, 53, 54, 39, 56, 63, 32, 49, 50, 35, 52, 35, 52, 37, 38, 55, 40><<<num_blocks, threads_per_block>>>(unchecked_hashes, bitsplitted_keys); } void des_25_encrypt_salt2900(const size_t num_blocks, const size_t threads_per_block, vtype* const unchecked_hashes, const vtype* const bitsplitted_keys) { des_25_encrypt<31, 0, 17, 2, 19, 4, 19, 4, 21, 22, 7, 24, 15, 16, 1, 18, 3, 20, 3, 20, 5, 6, 23, 8, 63, 32, 49, 34, 51, 36, 51, 36, 53, 54, 39, 56, 47, 48, 33, 50, 35, 52, 35, 52, 37, 38, 55, 40><<<num_blocks, threads_per_block>>>(unchecked_hashes, bitsplitted_keys); } void des_25_encrypt_salt2901(const size_t num_blocks, const size_t threads_per_block, vtype* const unchecked_hashes, const vtype* const bitsplitted_keys) { des_25_encrypt<15, 0, 17, 2, 19, 4, 19, 4, 21, 22, 7, 24, 31, 16, 1, 18, 3, 20, 3, 20, 5, 6, 23, 8, 47, 32, 49, 34, 51, 36, 51, 36, 53, 54, 39, 56, 63, 48, 33, 50, 35, 52, 35, 52, 37, 38, 55, 40><<<num_blocks, threads_per_block>>>(unchecked_hashes, bitsplitted_keys); } void des_25_encrypt_salt2902(const size_t num_blocks, const size_t threads_per_block, vtype* const unchecked_hashes, const vtype* const bitsplitted_keys) { des_25_encrypt<31, 16, 17, 2, 19, 4, 19, 4, 21, 22, 7, 24, 15, 0, 1, 18, 3, 20, 3, 20, 5, 6, 23, 8, 63, 48, 49, 34, 51, 36, 51, 36, 53, 54, 39, 56, 47, 32, 33, 50, 35, 52, 35, 52, 37, 38, 55, 40><<<num_blocks, threads_per_block>>>(unchecked_hashes, bitsplitted_keys); } void des_25_encrypt_salt2903(const size_t num_blocks, const size_t threads_per_block, vtype* const unchecked_hashes, const vtype* const bitsplitted_keys) { des_25_encrypt<15, 16, 17, 2, 19, 4, 19, 4, 21, 22, 7, 24, 31, 0, 1, 18, 3, 20, 3, 20, 5, 6, 23, 8, 47, 48, 49, 34, 51, 36, 51, 36, 53, 54, 39, 56, 63, 32, 33, 50, 35, 52, 35, 52, 37, 38, 55, 40><<<num_blocks, threads_per_block>>>(unchecked_hashes, bitsplitted_keys); } void des_25_encrypt_salt2904(const size_t num_blocks, const size_t threads_per_block, vtype* const unchecked_hashes, const vtype* const bitsplitted_keys) { des_25_encrypt<31, 0, 1, 18, 19, 4, 19, 4, 21, 22, 7, 24, 15, 16, 17, 2, 3, 20, 3, 20, 5, 6, 23, 8, 63, 32, 33, 50, 51, 36, 51, 36, 53, 54, 39, 56, 47, 48, 49, 34, 35, 52, 35, 52, 37, 38, 55, 40><<<num_blocks, threads_per_block>>>(unchecked_hashes, bitsplitted_keys); } void des_25_encrypt_salt2905(const size_t num_blocks, const size_t threads_per_block, vtype* const unchecked_hashes, const vtype* const bitsplitted_keys) { des_25_encrypt<15, 0, 1, 18, 19, 4, 19, 4, 21, 22, 7, 24, 31, 16, 17, 2, 3, 20, 3, 20, 5, 6, 23, 8, 47, 32, 33, 50, 51, 36, 51, 36, 53, 54, 39, 56, 63, 48, 49, 34, 35, 52, 35, 52, 37, 38, 55, 40><<<num_blocks, threads_per_block>>>(unchecked_hashes, bitsplitted_keys); } void des_25_encrypt_salt2906(const size_t num_blocks, const size_t threads_per_block, vtype* const unchecked_hashes, const vtype* const bitsplitted_keys) { des_25_encrypt<31, 16, 1, 18, 19, 4, 19, 4, 21, 22, 7, 24, 15, 0, 17, 2, 3, 20, 3, 20, 5, 6, 23, 8, 63, 48, 33, 50, 51, 36, 51, 36, 53, 54, 39, 56, 47, 32, 49, 34, 35, 52, 35, 52, 37, 38, 55, 40><<<num_blocks, threads_per_block>>>(unchecked_hashes, bitsplitted_keys); } void des_25_encrypt_salt2907(const size_t num_blocks, const size_t threads_per_block, vtype* const unchecked_hashes, const vtype* const bitsplitted_keys) { des_25_encrypt<15, 16, 1, 18, 19, 4, 19, 4, 21, 22, 7, 24, 31, 0, 17, 2, 3, 20, 3, 20, 5, 6, 23, 8, 47, 48, 33, 50, 51, 36, 51, 36, 53, 54, 39, 56, 63, 32, 49, 34, 35, 52, 35, 52, 37, 38, 55, 40><<<num_blocks, threads_per_block>>>(unchecked_hashes, bitsplitted_keys); } void des_25_encrypt_salt2908(const size_t num_blocks, const size_t threads_per_block, vtype* const unchecked_hashes, const vtype* const bitsplitted_keys) { des_25_encrypt<31, 0, 17, 18, 19, 4, 19, 4, 21, 22, 7, 24, 15, 16, 1, 2, 3, 20, 3, 20, 5, 6, 23, 8, 63, 32, 49, 50, 51, 36, 51, 36, 53, 54, 39, 56, 47, 48, 33, 34, 35, 52, 35, 52, 37, 38, 55, 40><<<num_blocks, threads_per_block>>>(unchecked_hashes, bitsplitted_keys); } void des_25_encrypt_salt2909(const size_t num_blocks, const size_t threads_per_block, vtype* const unchecked_hashes, const vtype* const bitsplitted_keys) { des_25_encrypt<15, 0, 17, 18, 19, 4, 19, 4, 21, 22, 7, 24, 31, 16, 1, 2, 3, 20, 3, 20, 5, 6, 23, 8, 47, 32, 49, 50, 51, 36, 51, 36, 53, 54, 39, 56, 63, 48, 33, 34, 35, 52, 35, 52, 37, 38, 55, 40><<<num_blocks, threads_per_block>>>(unchecked_hashes, bitsplitted_keys); } void des_25_encrypt_salt2910(const size_t num_blocks, const size_t threads_per_block, vtype* const unchecked_hashes, const vtype* const bitsplitted_keys) { des_25_encrypt<31, 16, 17, 18, 19, 4, 19, 4, 21, 22, 7, 24, 15, 0, 1, 2, 3, 20, 3, 20, 5, 6, 23, 8, 63, 48, 49, 50, 51, 36, 51, 36, 53, 54, 39, 56, 47, 32, 33, 34, 35, 52, 35, 52, 37, 38, 55, 40><<<num_blocks, threads_per_block>>>(unchecked_hashes, bitsplitted_keys); } void des_25_encrypt_salt2911(const size_t num_blocks, const size_t threads_per_block, vtype* const unchecked_hashes, const vtype* const bitsplitted_keys) { des_25_encrypt<15, 16, 17, 18, 19, 4, 19, 4, 21, 22, 7, 24, 31, 0, 1, 2, 3, 20, 3, 20, 5, 6, 23, 8, 47, 48, 49, 50, 51, 36, 51, 36, 53, 54, 39, 56, 63, 32, 33, 34, 35, 52, 35, 52, 37, 38, 55, 40><<<num_blocks, threads_per_block>>>(unchecked_hashes, bitsplitted_keys); } void des_25_encrypt_salt2912(const size_t num_blocks, const size_t threads_per_block, vtype* const unchecked_hashes, const vtype* const bitsplitted_keys) { des_25_encrypt<31, 0, 1, 2, 3, 20, 19, 4, 21, 22, 7, 24, 15, 16, 17, 18, 19, 4, 3, 20, 5, 6, 23, 8, 63, 32, 33, 34, 35, 52, 51, 36, 53, 54, 39, 56, 47, 48, 49, 50, 51, 36, 35, 52, 37, 38, 55, 40><<<num_blocks, threads_per_block>>>(unchecked_hashes, bitsplitted_keys); } void des_25_encrypt_salt2913(const size_t num_blocks, const size_t threads_per_block, vtype* const unchecked_hashes, const vtype* const bitsplitted_keys) { des_25_encrypt<15, 0, 1, 2, 3, 20, 19, 4, 21, 22, 7, 24, 31, 16, 17, 18, 19, 4, 3, 20, 5, 6, 23, 8, 47, 32, 33, 34, 35, 52, 51, 36, 53, 54, 39, 56, 63, 48, 49, 50, 51, 36, 35, 52, 37, 38, 55, 40><<<num_blocks, threads_per_block>>>(unchecked_hashes, bitsplitted_keys); } void des_25_encrypt_salt2914(const size_t num_blocks, const size_t threads_per_block, vtype* const unchecked_hashes, const vtype* const bitsplitted_keys) { des_25_encrypt<31, 16, 1, 2, 3, 20, 19, 4, 21, 22, 7, 24, 15, 0, 17, 18, 19, 4, 3, 20, 5, 6, 23, 8, 63, 48, 33, 34, 35, 52, 51, 36, 53, 54, 39, 56, 47, 32, 49, 50, 51, 36, 35, 52, 37, 38, 55, 40><<<num_blocks, threads_per_block>>>(unchecked_hashes, bitsplitted_keys); } void des_25_encrypt_salt2915(const size_t num_blocks, const size_t threads_per_block, vtype* const unchecked_hashes, const vtype* const bitsplitted_keys) { des_25_encrypt<15, 16, 1, 2, 3, 20, 19, 4, 21, 22, 7, 24, 31, 0, 17, 18, 19, 4, 3, 20, 5, 6, 23, 8, 47, 48, 33, 34, 35, 52, 51, 36, 53, 54, 39, 56, 63, 32, 49, 50, 51, 36, 35, 52, 37, 38, 55, 40><<<num_blocks, threads_per_block>>>(unchecked_hashes, bitsplitted_keys); } void des_25_encrypt_salt2916(const size_t num_blocks, const size_t threads_per_block, vtype* const unchecked_hashes, const vtype* const bitsplitted_keys) { des_25_encrypt<31, 0, 17, 2, 3, 20, 19, 4, 21, 22, 7, 24, 15, 16, 1, 18, 19, 4, 3, 20, 5, 6, 23, 8, 63, 32, 49, 34, 35, 52, 51, 36, 53, 54, 39, 56, 47, 48, 33, 50, 51, 36, 35, 52, 37, 38, 55, 40><<<num_blocks, threads_per_block>>>(unchecked_hashes, bitsplitted_keys); } void des_25_encrypt_salt2917(const size_t num_blocks, const size_t threads_per_block, vtype* const unchecked_hashes, const vtype* const bitsplitted_keys) { des_25_encrypt<15, 0, 17, 2, 3, 20, 19, 4, 21, 22, 7, 24, 31, 16, 1, 18, 19, 4, 3, 20, 5, 6, 23, 8, 47, 32, 49, 34, 35, 52, 51, 36, 53, 54, 39, 56, 63, 48, 33, 50, 51, 36, 35, 52, 37, 38, 55, 40><<<num_blocks, threads_per_block>>>(unchecked_hashes, bitsplitted_keys); } void des_25_encrypt_salt2918(const size_t num_blocks, const size_t threads_per_block, vtype* const unchecked_hashes, const vtype* const bitsplitted_keys) { des_25_encrypt<31, 16, 17, 2, 3, 20, 19, 4, 21, 22, 7, 24, 15, 0, 1, 18, 19, 4, 3, 20, 5, 6, 23, 8, 63, 48, 49, 34, 35, 52, 51, 36, 53, 54, 39, 56, 47, 32, 33, 50, 51, 36, 35, 52, 37, 38, 55, 40><<<num_blocks, threads_per_block>>>(unchecked_hashes, bitsplitted_keys); } void des_25_encrypt_salt2919(const size_t num_blocks, const size_t threads_per_block, vtype* const unchecked_hashes, const vtype* const bitsplitted_keys) { des_25_encrypt<15, 16, 17, 2, 3, 20, 19, 4, 21, 22, 7, 24, 31, 0, 1, 18, 19, 4, 3, 20, 5, 6, 23, 8, 47, 48, 49, 34, 35, 52, 51, 36, 53, 54, 39, 56, 63, 32, 33, 50, 51, 36, 35, 52, 37, 38, 55, 40><<<num_blocks, threads_per_block>>>(unchecked_hashes, bitsplitted_keys); } void des_25_encrypt_salt2920(const size_t num_blocks, const size_t threads_per_block, vtype* const unchecked_hashes, const vtype* const bitsplitted_keys) { des_25_encrypt<31, 0, 1, 18, 3, 20, 19, 4, 21, 22, 7, 24, 15, 16, 17, 2, 19, 4, 3, 20, 5, 6, 23, 8, 63, 32, 33, 50, 35, 52, 51, 36, 53, 54, 39, 56, 47, 48, 49, 34, 51, 36, 35, 52, 37, 38, 55, 40><<<num_blocks, threads_per_block>>>(unchecked_hashes, bitsplitted_keys); } void des_25_encrypt_salt2921(const size_t num_blocks, const size_t threads_per_block, vtype* const unchecked_hashes, const vtype* const bitsplitted_keys) { des_25_encrypt<15, 0, 1, 18, 3, 20, 19, 4, 21, 22, 7, 24, 31, 16, 17, 2, 19, 4, 3, 20, 5, 6, 23, 8, 47, 32, 33, 50, 35, 52, 51, 36, 53, 54, 39, 56, 63, 48, 49, 34, 51, 36, 35, 52, 37, 38, 55, 40><<<num_blocks, threads_per_block>>>(unchecked_hashes, bitsplitted_keys); } void des_25_encrypt_salt2922(const size_t num_blocks, const size_t threads_per_block, vtype* const unchecked_hashes, const vtype* const bitsplitted_keys) { des_25_encrypt<31, 16, 1, 18, 3, 20, 19, 4, 21, 22, 7, 24, 15, 0, 17, 2, 19, 4, 3, 20, 5, 6, 23, 8, 63, 48, 33, 50, 35, 52, 51, 36, 53, 54, 39, 56, 47, 32, 49, 34, 51, 36, 35, 52, 37, 38, 55, 40><<<num_blocks, threads_per_block>>>(unchecked_hashes, bitsplitted_keys); } void des_25_encrypt_salt2923(const size_t num_blocks, const size_t threads_per_block, vtype* const unchecked_hashes, const vtype* const bitsplitted_keys) { des_25_encrypt<15, 16, 1, 18, 3, 20, 19, 4, 21, 22, 7, 24, 31, 0, 17, 2, 19, 4, 3, 20, 5, 6, 23, 8, 47, 48, 33, 50, 35, 52, 51, 36, 53, 54, 39, 56, 63, 32, 49, 34, 51, 36, 35, 52, 37, 38, 55, 40><<<num_blocks, threads_per_block>>>(unchecked_hashes, bitsplitted_keys); } void des_25_encrypt_salt2924(const size_t num_blocks, const size_t threads_per_block, vtype* const unchecked_hashes, const vtype* const bitsplitted_keys) { des_25_encrypt<31, 0, 17, 18, 3, 20, 19, 4, 21, 22, 7, 24, 15, 16, 1, 2, 19, 4, 3, 20, 5, 6, 23, 8, 63, 32, 49, 50, 35, 52, 51, 36, 53, 54, 39, 56, 47, 48, 33, 34, 51, 36, 35, 52, 37, 38, 55, 40><<<num_blocks, threads_per_block>>>(unchecked_hashes, bitsplitted_keys); } void des_25_encrypt_salt2925(const size_t num_blocks, const size_t threads_per_block, vtype* const unchecked_hashes, const vtype* const bitsplitted_keys) { des_25_encrypt<15, 0, 17, 18, 3, 20, 19, 4, 21, 22, 7, 24, 31, 16, 1, 2, 19, 4, 3, 20, 5, 6, 23, 8, 47, 32, 49, 50, 35, 52, 51, 36, 53, 54, 39, 56, 63, 48, 33, 34, 51, 36, 35, 52, 37, 38, 55, 40><<<num_blocks, threads_per_block>>>(unchecked_hashes, bitsplitted_keys); } void des_25_encrypt_salt2926(const size_t num_blocks, const size_t threads_per_block, vtype* const unchecked_hashes, const vtype* const bitsplitted_keys) { des_25_encrypt<31, 16, 17, 18, 3, 20, 19, 4, 21, 22, 7, 24, 15, 0, 1, 2, 19, 4, 3, 20, 5, 6, 23, 8, 63, 48, 49, 50, 35, 52, 51, 36, 53, 54, 39, 56, 47, 32, 33, 34, 51, 36, 35, 52, 37, 38, 55, 40><<<num_blocks, threads_per_block>>>(unchecked_hashes, bitsplitted_keys); } void des_25_encrypt_salt2927(const size_t num_blocks, const size_t threads_per_block, vtype* const unchecked_hashes, const vtype* const bitsplitted_keys) { des_25_encrypt<15, 16, 17, 18, 3, 20, 19, 4, 21, 22, 7, 24, 31, 0, 1, 2, 19, 4, 3, 20, 5, 6, 23, 8, 47, 48, 49, 50, 35, 52, 51, 36, 53, 54, 39, 56, 63, 32, 33, 34, 51, 36, 35, 52, 37, 38, 55, 40><<<num_blocks, threads_per_block>>>(unchecked_hashes, bitsplitted_keys); } void des_25_encrypt_salt2928(const size_t num_blocks, const size_t threads_per_block, vtype* const unchecked_hashes, const vtype* const bitsplitted_keys) { des_25_encrypt<31, 0, 1, 2, 19, 20, 19, 4, 21, 22, 7, 24, 15, 16, 17, 18, 3, 4, 3, 20, 5, 6, 23, 8, 63, 32, 33, 34, 51, 52, 51, 36, 53, 54, 39, 56, 47, 48, 49, 50, 35, 36, 35, 52, 37, 38, 55, 40><<<num_blocks, threads_per_block>>>(unchecked_hashes, bitsplitted_keys); } void des_25_encrypt_salt2929(const size_t num_blocks, const size_t threads_per_block, vtype* const unchecked_hashes, const vtype* const bitsplitted_keys) { des_25_encrypt<15, 0, 1, 2, 19, 20, 19, 4, 21, 22, 7, 24, 31, 16, 17, 18, 3, 4, 3, 20, 5, 6, 23, 8, 47, 32, 33, 34, 51, 52, 51, 36, 53, 54, 39, 56, 63, 48, 49, 50, 35, 36, 35, 52, 37, 38, 55, 40><<<num_blocks, threads_per_block>>>(unchecked_hashes, bitsplitted_keys); } void des_25_encrypt_salt2930(const size_t num_blocks, const size_t threads_per_block, vtype* const unchecked_hashes, const vtype* const bitsplitted_keys) { des_25_encrypt<31, 16, 1, 2, 19, 20, 19, 4, 21, 22, 7, 24, 15, 0, 17, 18, 3, 4, 3, 20, 5, 6, 23, 8, 63, 48, 33, 34, 51, 52, 51, 36, 53, 54, 39, 56, 47, 32, 49, 50, 35, 36, 35, 52, 37, 38, 55, 40><<<num_blocks, threads_per_block>>>(unchecked_hashes, bitsplitted_keys); } void des_25_encrypt_salt2931(const size_t num_blocks, const size_t threads_per_block, vtype* const unchecked_hashes, const vtype* const bitsplitted_keys) { des_25_encrypt<15, 16, 1, 2, 19, 20, 19, 4, 21, 22, 7, 24, 31, 0, 17, 18, 3, 4, 3, 20, 5, 6, 23, 8, 47, 48, 33, 34, 51, 52, 51, 36, 53, 54, 39, 56, 63, 32, 49, 50, 35, 36, 35, 52, 37, 38, 55, 40><<<num_blocks, threads_per_block>>>(unchecked_hashes, bitsplitted_keys); } void des_25_encrypt_salt2932(const size_t num_blocks, const size_t threads_per_block, vtype* const unchecked_hashes, const vtype* const bitsplitted_keys) { des_25_encrypt<31, 0, 17, 2, 19, 20, 19, 4, 21, 22, 7, 24, 15, 16, 1, 18, 3, 4, 3, 20, 5, 6, 23, 8, 63, 32, 49, 34, 51, 52, 51, 36, 53, 54, 39, 56, 47, 48, 33, 50, 35, 36, 35, 52, 37, 38, 55, 40><<<num_blocks, threads_per_block>>>(unchecked_hashes, bitsplitted_keys); } void des_25_encrypt_salt2933(const size_t num_blocks, const size_t threads_per_block, vtype* const unchecked_hashes, const vtype* const bitsplitted_keys) { des_25_encrypt<15, 0, 17, 2, 19, 20, 19, 4, 21, 22, 7, 24, 31, 16, 1, 18, 3, 4, 3, 20, 5, 6, 23, 8, 47, 32, 49, 34, 51, 52, 51, 36, 53, 54, 39, 56, 63, 48, 33, 50, 35, 36, 35, 52, 37, 38, 55, 40><<<num_blocks, threads_per_block>>>(unchecked_hashes, bitsplitted_keys); } void des_25_encrypt_salt2934(const size_t num_blocks, const size_t threads_per_block, vtype* const unchecked_hashes, const vtype* const bitsplitted_keys) { des_25_encrypt<31, 16, 17, 2, 19, 20, 19, 4, 21, 22, 7, 24, 15, 0, 1, 18, 3, 4, 3, 20, 5, 6, 23, 8, 63, 48, 49, 34, 51, 52, 51, 36, 53, 54, 39, 56, 47, 32, 33, 50, 35, 36, 35, 52, 37, 38, 55, 40><<<num_blocks, threads_per_block>>>(unchecked_hashes, bitsplitted_keys); } void des_25_encrypt_salt2935(const size_t num_blocks, const size_t threads_per_block, vtype* const unchecked_hashes, const vtype* const bitsplitted_keys) { des_25_encrypt<15, 16, 17, 2, 19, 20, 19, 4, 21, 22, 7, 24, 31, 0, 1, 18, 3, 4, 3, 20, 5, 6, 23, 8, 47, 48, 49, 34, 51, 52, 51, 36, 53, 54, 39, 56, 63, 32, 33, 50, 35, 36, 35, 52, 37, 38, 55, 40><<<num_blocks, threads_per_block>>>(unchecked_hashes, bitsplitted_keys); } void des_25_encrypt_salt2936(const size_t num_blocks, const size_t threads_per_block, vtype* const unchecked_hashes, const vtype* const bitsplitted_keys) { des_25_encrypt<31, 0, 1, 18, 19, 20, 19, 4, 21, 22, 7, 24, 15, 16, 17, 2, 3, 4, 3, 20, 5, 6, 23, 8, 63, 32, 33, 50, 51, 52, 51, 36, 53, 54, 39, 56, 47, 48, 49, 34, 35, 36, 35, 52, 37, 38, 55, 40><<<num_blocks, threads_per_block>>>(unchecked_hashes, bitsplitted_keys); } void des_25_encrypt_salt2937(const size_t num_blocks, const size_t threads_per_block, vtype* const unchecked_hashes, const vtype* const bitsplitted_keys) { des_25_encrypt<15, 0, 1, 18, 19, 20, 19, 4, 21, 22, 7, 24, 31, 16, 17, 2, 3, 4, 3, 20, 5, 6, 23, 8, 47, 32, 33, 50, 51, 52, 51, 36, 53, 54, 39, 56, 63, 48, 49, 34, 35, 36, 35, 52, 37, 38, 55, 40><<<num_blocks, threads_per_block>>>(unchecked_hashes, bitsplitted_keys); } void des_25_encrypt_salt2938(const size_t num_blocks, const size_t threads_per_block, vtype* const unchecked_hashes, const vtype* const bitsplitted_keys) { des_25_encrypt<31, 16, 1, 18, 19, 20, 19, 4, 21, 22, 7, 24, 15, 0, 17, 2, 3, 4, 3, 20, 5, 6, 23, 8, 63, 48, 33, 50, 51, 52, 51, 36, 53, 54, 39, 56, 47, 32, 49, 34, 35, 36, 35, 52, 37, 38, 55, 40><<<num_blocks, threads_per_block>>>(unchecked_hashes, bitsplitted_keys); } void des_25_encrypt_salt2939(const size_t num_blocks, const size_t threads_per_block, vtype* const unchecked_hashes, const vtype* const bitsplitted_keys) { des_25_encrypt<15, 16, 1, 18, 19, 20, 19, 4, 21, 22, 7, 24, 31, 0, 17, 2, 3, 4, 3, 20, 5, 6, 23, 8, 47, 48, 33, 50, 51, 52, 51, 36, 53, 54, 39, 56, 63, 32, 49, 34, 35, 36, 35, 52, 37, 38, 55, 40><<<num_blocks, threads_per_block>>>(unchecked_hashes, bitsplitted_keys); } void des_25_encrypt_salt2940(const size_t num_blocks, const size_t threads_per_block, vtype* const unchecked_hashes, const vtype* const bitsplitted_keys) { des_25_encrypt<31, 0, 17, 18, 19, 20, 19, 4, 21, 22, 7, 24, 15, 16, 1, 2, 3, 4, 3, 20, 5, 6, 23, 8, 63, 32, 49, 50, 51, 52, 51, 36, 53, 54, 39, 56, 47, 48, 33, 34, 35, 36, 35, 52, 37, 38, 55, 40><<<num_blocks, threads_per_block>>>(unchecked_hashes, bitsplitted_keys); } void des_25_encrypt_salt2941(const size_t num_blocks, const size_t threads_per_block, vtype* const unchecked_hashes, const vtype* const bitsplitted_keys) { des_25_encrypt<15, 0, 17, 18, 19, 20, 19, 4, 21, 22, 7, 24, 31, 16, 1, 2, 3, 4, 3, 20, 5, 6, 23, 8, 47, 32, 49, 50, 51, 52, 51, 36, 53, 54, 39, 56, 63, 48, 33, 34, 35, 36, 35, 52, 37, 38, 55, 40><<<num_blocks, threads_per_block>>>(unchecked_hashes, bitsplitted_keys); } void des_25_encrypt_salt2942(const size_t num_blocks, const size_t threads_per_block, vtype* const unchecked_hashes, const vtype* const bitsplitted_keys) { des_25_encrypt<31, 16, 17, 18, 19, 20, 19, 4, 21, 22, 7, 24, 15, 0, 1, 2, 3, 4, 3, 20, 5, 6, 23, 8, 63, 48, 49, 50, 51, 52, 51, 36, 53, 54, 39, 56, 47, 32, 33, 34, 35, 36, 35, 52, 37, 38, 55, 40><<<num_blocks, threads_per_block>>>(unchecked_hashes, bitsplitted_keys); } void des_25_encrypt_salt2943(const size_t num_blocks, const size_t threads_per_block, vtype* const unchecked_hashes, const vtype* const bitsplitted_keys) { des_25_encrypt<15, 16, 17, 18, 19, 20, 19, 4, 21, 22, 7, 24, 31, 0, 1, 2, 3, 4, 3, 20, 5, 6, 23, 8, 47, 48, 49, 50, 51, 52, 51, 36, 53, 54, 39, 56, 63, 32, 33, 34, 35, 36, 35, 52, 37, 38, 55, 40><<<num_blocks, threads_per_block>>>(unchecked_hashes, bitsplitted_keys); } #endif // DESGPU_COMPILE_ALL_SALTS
c92c8acbc2d8cf8c9d934c75628dcacc1f642029.hip
// !!! This is a file automatically generated by hipify!!! // Reduction - Workshop 7 // w7_2.cu #include <iostream> #include <cstdlib> #include <ctime> #include <iomanip> // CUDA header file #include <hip/hip_runtime.h> #include <device_launch_parameters.h> #ifndef __HIPCC__ #define __HIPCC__ #endif #include <hip/device_functions.h> const int ntpb = 1024; // number of threads per block void init(float* a, int n, bool debug) { float f = 1.0f / RAND_MAX; for (int i = 0; i < n; i++) if (debug) a[i] = 1.0f; else a[i] = std::rand() * f; // [0.0f 1.0f] } // kernel 1 - product __global__ void product(float * a, float * b, int n) { int i = blockIdx.x * blockDim.x + threadIdx.x; if (i < n) { a[i] = a[i] * b[i]; } } // kernel 2 - reduction on a single block __global__ void reduction(float * a, float * c, int n) { int i = blockIdx.x * blockDim.x + threadIdx.x; int t = threadIdx.x; __shared__ float sharedMem[ntpb]; sharedMem[t] = a[i]; __syncthreads(); for (int stride = 1; stride < blockDim.x; stride <<= 1) { if (t % (stride * 2) == 0) sharedMem[t] += sharedMem[t + stride]; __syncthreads(); } if (t == 0) c[blockIdx.x] = sharedMem[0]; } int main(int argc, char** argv) { // interpret command-line arguments if (argc != 2 && argc != 3) { std::cerr << argv[0] << ": invalid number of arguments\n"; std::cerr << "Usage: " << argv[0] << " size_of_vectors\n"; return 1; } int n = atoi(argv[1]); bool debug = argc == 3; std::srand((unsigned)time(nullptr)); // calculate required number of blocks int nblks = (n + ntpb - 1) / ntpb; // host vectors float* h_a = new float[ntpb * nblks]; float* h_b = new float[ntpb * nblks]; init(h_a, n, debug); init(h_b, n, debug); for (int i = n; i < nblks * ntpb; i++) { h_a[i] = 0.0f; h_b[i] = 0.0f; } // dot product on the host float h_h = 0.f; for (int i = 0; i < n; i++) { h_h += h_a[i] * h_b[i]; } // allocate device vectors (d_a[nblks * ntpb], d_b[n], d_c[nblks]) float* d_a; float* d_b; float* d_c; hipMalloc((void **)&d_a, nblks * ntpb * sizeof(float)); hipMalloc((void **)&d_b, nblks * ntpb * sizeof(float)); hipMalloc((void **)&d_c, nblks * sizeof(float)); // copy from the host to the device h_a -> d_a, h_b -> d-b hipMemcpy(d_a, h_a, nblks * ntpb * sizeof(float), hipMemcpyHostToDevice); hipMemcpy(d_b, h_b, nblks * ntpb * sizeof(float), hipMemcpyHostToDevice); // Kerne l - product product << <nblks, ntpb >> > (d_a, d_b, n); hipDeviceSynchronize(); // Kernel 2 - reduction to one value per block reduction << <nblks, ntpb >> > (d_a, d_c, n); hipDeviceSynchronize(); // intermediate debugging output if (debug) { float* h_c = new float[nblks]; hipMemcpy(h_c, d_c, nblks * sizeof(float), hipMemcpyDeviceToHost); for (int i = 0; i < nblks; i++) std::cout << h_c[i] << ' '; std::cout << std::endl; delete[] h_c; } // reduction of block values to a single value reduction << <nblks, ntpb >> > (d_c, d_a, n); hipDeviceSynchronize(); // copy final result from device to host - from d_c to h_c float h_c; hipMemcpy(&h_c, d_a, sizeof(float), hipMemcpyDeviceToHost); // report your results std::cout << std::fixed << std::setprecision(3); std::cout << "Device = " << h_c << "\nHost = " << h_h << "\n\n"; // free device memory hipFree(d_a); hipFree(d_b); hipFree(d_c); // free host memory delete[] h_a; delete[] h_b; }
c92c8acbc2d8cf8c9d934c75628dcacc1f642029.cu
// Reduction - Workshop 7 // w7_2.cu #include <iostream> #include <cstdlib> #include <ctime> #include <iomanip> // CUDA header file #include <cuda_runtime.h> #include <device_launch_parameters.h> #ifndef __CUDACC__ #define __CUDACC__ #endif #include <device_functions.h> const int ntpb = 1024; // number of threads per block void init(float* a, int n, bool debug) { float f = 1.0f / RAND_MAX; for (int i = 0; i < n; i++) if (debug) a[i] = 1.0f; else a[i] = std::rand() * f; // [0.0f 1.0f] } // kernel 1 - product __global__ void product(float * a, float * b, int n) { int i = blockIdx.x * blockDim.x + threadIdx.x; if (i < n) { a[i] = a[i] * b[i]; } } // kernel 2 - reduction on a single block __global__ void reduction(float * a, float * c, int n) { int i = blockIdx.x * blockDim.x + threadIdx.x; int t = threadIdx.x; __shared__ float sharedMem[ntpb]; sharedMem[t] = a[i]; __syncthreads(); for (int stride = 1; stride < blockDim.x; stride <<= 1) { if (t % (stride * 2) == 0) sharedMem[t] += sharedMem[t + stride]; __syncthreads(); } if (t == 0) c[blockIdx.x] = sharedMem[0]; } int main(int argc, char** argv) { // interpret command-line arguments if (argc != 2 && argc != 3) { std::cerr << argv[0] << ": invalid number of arguments\n"; std::cerr << "Usage: " << argv[0] << " size_of_vectors\n"; return 1; } int n = atoi(argv[1]); bool debug = argc == 3; std::srand((unsigned)time(nullptr)); // calculate required number of blocks int nblks = (n + ntpb - 1) / ntpb; // host vectors float* h_a = new float[ntpb * nblks]; float* h_b = new float[ntpb * nblks]; init(h_a, n, debug); init(h_b, n, debug); for (int i = n; i < nblks * ntpb; i++) { h_a[i] = 0.0f; h_b[i] = 0.0f; } // dot product on the host float h_h = 0.f; for (int i = 0; i < n; i++) { h_h += h_a[i] * h_b[i]; } // allocate device vectors (d_a[nblks * ntpb], d_b[n], d_c[nblks]) float* d_a; float* d_b; float* d_c; cudaMalloc((void **)&d_a, nblks * ntpb * sizeof(float)); cudaMalloc((void **)&d_b, nblks * ntpb * sizeof(float)); cudaMalloc((void **)&d_c, nblks * sizeof(float)); // copy from the host to the device h_a -> d_a, h_b -> d-b cudaMemcpy(d_a, h_a, nblks * ntpb * sizeof(float), cudaMemcpyHostToDevice); cudaMemcpy(d_b, h_b, nblks * ntpb * sizeof(float), cudaMemcpyHostToDevice); // Kerne l - product product << <nblks, ntpb >> > (d_a, d_b, n); cudaDeviceSynchronize(); // Kernel 2 - reduction to one value per block reduction << <nblks, ntpb >> > (d_a, d_c, n); cudaDeviceSynchronize(); // intermediate debugging output if (debug) { float* h_c = new float[nblks]; cudaMemcpy(h_c, d_c, nblks * sizeof(float), cudaMemcpyDeviceToHost); for (int i = 0; i < nblks; i++) std::cout << h_c[i] << ' '; std::cout << std::endl; delete[] h_c; } // reduction of block values to a single value reduction << <nblks, ntpb >> > (d_c, d_a, n); cudaDeviceSynchronize(); // copy final result from device to host - from d_c to h_c float h_c; cudaMemcpy(&h_c, d_a, sizeof(float), cudaMemcpyDeviceToHost); // report your results std::cout << std::fixed << std::setprecision(3); std::cout << "Device = " << h_c << "\nHost = " << h_h << "\n\n"; // free device memory cudaFree(d_a); cudaFree(d_b); cudaFree(d_c); // free host memory delete[] h_a; delete[] h_b; }
b3571414920fbf7d8f6c49aaa10d35c68b63ba3a.hip
// !!! This is a file automatically generated by hipify!!! #ifndef THC_GENERIC_FILE #define THC_GENERIC_FILE "THH/generic/THHTensorMathPointwise.hip" #else #include <ATen/MemoryOverlap.h> #include <ATen/NamedTensorUtils.h> void THCTensor_(cbitand)(THCState* state, THCTensor *self_, THCTensor *src1, THCTensor *src2) { #if defined(THC_REAL_IS_HALF) || defined(THC_REAL_IS_FLOAT) || defined(THC_REAL_IS_DOUBLE) return THError("cbitand is only supported for integer type tensors"); #else THAssert(THCTensor_(checkGPU)(state, 3, self_, src1, src2)); THArgCheck(THCTensor_(nElement)(state, src1) == THCTensor_(nElement)(state, src2), 3, "sizes do not match"); if (self_ == src1) { // self /= src2 if (!THC_pointwiseApply2<scalar_t, scalar_t>(state, self_, src2, TensorBitAndOp<scalar_t>())) { THArgCheck(false, 2, CUTORCH_DIM_WARNING); } } else { THCTensor_(resizeAs)(state, self_, src1); // self = src1 / src2 if (!THC_pointwiseApply3<scalar_t, scalar_t, scalar_t>(state, self_, src1, src2, TensorBitAndOp<scalar_t>())) { THArgCheck(false, 2, CUTORCH_DIM_WARNING); } } THCudaCheck(hipGetLastError()); #endif } void THCTensor_(cbitor)(THCState* state, THCTensor *self_, THCTensor *src1, THCTensor *src2) { #if defined(THC_REAL_IS_HALF) || defined(THC_REAL_IS_FLOAT) || defined(THC_REAL_IS_DOUBLE) return THError("cbitor is only supported for integer type tensors"); #else THAssert(THCTensor_(checkGPU)(state, 3, self_, src1, src2)); THArgCheck(THCTensor_(nElement)(state, src1) == THCTensor_(nElement)(state, src2), 3, "sizes do not match"); if (self_ == src1) { // self /= src2 if (!THC_pointwiseApply2<scalar_t, scalar_t>(state, self_, src2, TensorBitOrOp<scalar_t>())) { THArgCheck(false, 2, CUTORCH_DIM_WARNING); } } else { THCTensor_(resizeAs)(state, self_, src1); // self = src1 / src2 if (!THC_pointwiseApply3<scalar_t, scalar_t, scalar_t>(state, self_, src1, src2, TensorBitOrOp<scalar_t>())) { THArgCheck(false, 2, CUTORCH_DIM_WARNING); } } THCudaCheck(hipGetLastError()); #endif } void THCTensor_(cbitxor)(THCState* state, THCTensor *self_, THCTensor *src1, THCTensor *src2) { #if defined(THC_REAL_IS_HALF) || defined(THC_REAL_IS_FLOAT) || defined(THC_REAL_IS_DOUBLE) return THError("cbitor is only supported for integer type tensors"); #else THAssert(THCTensor_(checkGPU)(state, 3, self_, src1, src2)); THArgCheck(THCTensor_(nElement)(state, src1) == THCTensor_(nElement)(state, src2), 3, "sizes do not match"); if (self_ == src1) { // self /= src2 if (!THC_pointwiseApply2<scalar_t, scalar_t>(state, self_, src2, TensorBitXorOp<scalar_t>())) { THArgCheck(false, 2, CUTORCH_DIM_WARNING); } } else { THCTensor_(resizeAs)(state, self_, src1); // self = src1 / src2 if (!THC_pointwiseApply3<scalar_t, scalar_t, scalar_t>(state, self_, src1, src2, TensorBitXorOp<scalar_t>())) { THArgCheck(false, 2, CUTORCH_DIM_WARNING); } } THCudaCheck(hipGetLastError()); #endif } void THCTensor_(cmax)(THCState *state, THCTensor *self, THCTensor *src1, THCTensor *src2) { THCAssertSameGPU(THCTensor_(checkGPU)(state, 3, self, src1, src2)); THArgCheck(THCTensor_(nElement)(state, src1) == THCTensor_(nElement)(state, src2), 2, "sizes do not match"); if (self == src1) { if (!THC_pointwiseApply2<scalar_t, scalar_t>(state, self, src2, TensorMaxOp<scalar_t>())) { THArgCheck(false, 2, CUTORCH_DIM_WARNING); } } else { THCTensor_(resizeAs)(state, self, src1); if (!THC_pointwiseApply3<scalar_t, scalar_t, scalar_t>(state, self, src1, src2, TensorMaxOp<scalar_t>())) { THArgCheck(false, 2, CUTORCH_DIM_WARNING); } } } void THCTensor_(cmin)(THCState *state, THCTensor *self, THCTensor *src1, THCTensor *src2) { THCAssertSameGPU(THCTensor_(checkGPU)(state, 3, self, src1, src2)); THArgCheck(THCTensor_(nElement)(state, src1) == THCTensor_(nElement)(state, src2), 2, "sizes do not match"); if (self == src1) { if (!THC_pointwiseApply2<scalar_t, scalar_t>(state, self, src2, TensorMinOp<scalar_t>())) { THArgCheck(false, 2, CUTORCH_DIM_WARNING); } } else { THCTensor_(resizeAs)(state, self, src1); if (!THC_pointwiseApply3<scalar_t, scalar_t, scalar_t>(state, self, src1, src2, TensorMinOp<scalar_t>())) { THArgCheck(false, 2, CUTORCH_DIM_WARNING); } } } void THCTensor_(cmaxValue)(THCState *state, THCTensor *self, THCTensor *src, scalar_t value) { THCAssertSameGPU(THCTensor_(checkGPU)(state, 2, self, src)); if (self == src) { if (!THC_pointwiseApply1<scalar_t>(state, self, TensorMaxValueOp<scalar_t>(value))) { THArgCheck(false, 2, CUTORCH_DIM_WARNING); } } else { THCTensor_(resizeAs)(state, self, src); if (!THC_pointwiseApply2<scalar_t, scalar_t>(state, self, src, TensorMaxValueOp<scalar_t>(value))) { THArgCheck(false, 2, CUTORCH_DIM_WARNING); } } } void THCTensor_(cminValue)(THCState *state, THCTensor *self, THCTensor *src, scalar_t value) { THCAssertSameGPU(THCTensor_(checkGPU)(state, 2, self, src)); if (self == src) { if (!THC_pointwiseApply1<scalar_t>(state, self, TensorMinValueOp<scalar_t>(value))) { THArgCheck(false, 2, CUTORCH_DIM_WARNING); } } else { THCTensor_(resizeAs)(state, self, src); if (!THC_pointwiseApply2<scalar_t, scalar_t>(state, self, src, TensorMinValueOp<scalar_t>(value))) { THArgCheck(false, 2, CUTORCH_DIM_WARNING); } } } #if !defined(THC_REAL_IS_BOOL) static void propagate_names_if_named_tensor_enabled(THCTensor* result, THCTensor* src) { #ifdef BUILD_NAMEDTENSOR at::namedinference::propagate_names(result, src); #endif } #define IMPLEMENT_CUDA_TENSOR_BASIC_FUNC_(NAME, CFUNC, REAL) \ struct Tensor_##NAME##_##REAL##_Op { \ __device__ __forceinline__ void operator()(scalar_t* out, scalar_t* in) const { \ *out = CFUNC(*in); \ } \ \ __device__ __forceinline__ void operator()(scalar_t* v) const { \ *v = CFUNC(*v); \ } \ }; \ \ void THCTensor_(NAME)(THCState* state, THCTensor* self_, THCTensor* src) { \ THCAssertSameGPU(THCTensor_(checkGPU)(state, 2, self_, src)); \ at::assert_no_internal_overlap(self_); \ if (self_ == src) { \ if (!THC_pointwiseApply1<scalar_t>(state, self_, Tensor_##NAME##_##REAL##_Op())) { \ THArgCheck(false, 2, CUTORCH_DIM_WARNING); \ } \ } else { \ THCTensor_(resizeAs)(state, self_, src); \ \ if (!THC_pointwiseApply2<scalar_t, scalar_t>(state, self_, src, Tensor_##NAME##_##REAL##_Op())) { \ THArgCheck(false, 2, CUTORCH_DIM_WARNING); \ } \ } \ \ THCudaCheck(hipGetLastError()); \ propagate_names_if_named_tensor_enabled(self_, src); \ } #define IMPLEMENT_CUDA_TENSOR_BASIC_FUNC(NAME, CFUNC, REAL) \ IMPLEMENT_CUDA_TENSOR_BASIC_FUNC_(NAME, CFUNC, REAL) #if defined(THC_REAL_IS_FLOAT) || defined(THC_REAL_IS_DOUBLE) || defined(THC_REAL_IS_HALF) IMPLEMENT_CUDA_TENSOR_BASIC_FUNC( log, THCNumerics<scalar_t>::log, Real) IMPLEMENT_CUDA_TENSOR_BASIC_FUNC(lgamma, THCNumerics<scalar_t>::lgamma, Real) IMPLEMENT_CUDA_TENSOR_BASIC_FUNC(log10, THCNumerics<scalar_t>::log10, Real) IMPLEMENT_CUDA_TENSOR_BASIC_FUNC(log1p, THCNumerics<scalar_t>::log1p, Real) IMPLEMENT_CUDA_TENSOR_BASIC_FUNC( log2, THCNumerics<scalar_t>::log2, Real) IMPLEMENT_CUDA_TENSOR_BASIC_FUNC( exp, THCNumerics<scalar_t>::exp, Real) IMPLEMENT_CUDA_TENSOR_BASIC_FUNC(expm1, THCNumerics<scalar_t>::expm1, Real) IMPLEMENT_CUDA_TENSOR_BASIC_FUNC( cos, THCNumerics<scalar_t>::cos, Real) IMPLEMENT_CUDA_TENSOR_BASIC_FUNC( sin, THCNumerics<scalar_t>::sin, Real) IMPLEMENT_CUDA_TENSOR_BASIC_FUNC( sqrt, THCNumerics<scalar_t>::sqrt, Real) IMPLEMENT_CUDA_TENSOR_BASIC_FUNC(rsqrt, THCNumerics<scalar_t>::rsqrt, Real) IMPLEMENT_CUDA_TENSOR_BASIC_FUNC(floor, THCNumerics<scalar_t>::floor, Real) IMPLEMENT_CUDA_TENSOR_BASIC_FUNC(trunc, THCNumerics<scalar_t>::trunc, Real) IMPLEMENT_CUDA_TENSOR_BASIC_FUNC( acos, THCNumerics<scalar_t>::acos, Real) IMPLEMENT_CUDA_TENSOR_BASIC_FUNC( cosh, THCNumerics<scalar_t>::cosh, Real) IMPLEMENT_CUDA_TENSOR_BASIC_FUNC( asin, THCNumerics<scalar_t>::asin, Real) IMPLEMENT_CUDA_TENSOR_BASIC_FUNC( sinh, THCNumerics<scalar_t>::sinh, Real) IMPLEMENT_CUDA_TENSOR_BASIC_FUNC( tan, THCNumerics<scalar_t>::tan, Real) IMPLEMENT_CUDA_TENSOR_BASIC_FUNC( atan, THCNumerics<scalar_t>::atan, Real) IMPLEMENT_CUDA_TENSOR_BASIC_FUNC( tanh, THCNumerics<scalar_t>::tanh, Real) IMPLEMENT_CUDA_TENSOR_BASIC_FUNC( erf, THCNumerics<scalar_t>::erf, Real) IMPLEMENT_CUDA_TENSOR_BASIC_FUNC( erfc, THCNumerics<scalar_t>::erfc, Real) IMPLEMENT_CUDA_TENSOR_BASIC_FUNC( round, THCNumerics<scalar_t>::round, Real) IMPLEMENT_CUDA_TENSOR_BASIC_FUNC( frac, THCNumerics<scalar_t>::frac, Real) IMPLEMENT_CUDA_TENSOR_BASIC_FUNC( cinv, THCNumerics<scalar_t>::cinv, Real) #endif IMPLEMENT_CUDA_TENSOR_BASIC_FUNC( abs, THCNumerics<scalar_t>::abs, Real) #undef IMPLEMENT_CUDA_TENSOR_BASIC_FUNC_ #undef IMPLEMENT_CUDA_TENSOR_BASIC_FUNC void THCTensor_(clamp)(THCState *state, THCTensor *self_, THCTensor *src, scalar_t min_value, scalar_t max_value) { THCAssertSameGPU(THCTensor_(checkGPU)(state, 2, self_, src)); if (self_ == src) { if (!THC_pointwiseApply1<scalar_t>(state, self_, TensorClampOp<scalar_t>(min_value, max_value))) { THArgCheck(false, 2, CUTORCH_DIM_WARNING); } } else { THCTensor_(resizeAs)(state, self_, src); if (!THC_pointwiseApply2<scalar_t, scalar_t>(state, self_, src, TensorClampOp<scalar_t>(min_value, max_value))) { THArgCheck(false, 2, CUTORCH_DIM_WARNING); } } THCudaCheck(hipGetLastError()); } void THCTensor_(crossKernel)(THCState *state, THCTensor *self, THCTensor *x, THCTensor *y, int dimension) { THCAssertSameGPU(THCTensor_(checkGPU)(state, 3, self, x, y)); int64_t sx = THCTensor_(stride)(state, x, dimension); int64_t sy = THCTensor_(stride)(state, y, dimension); int64_t so = THCTensor_(stride)(state, self, dimension); THCTensor *nx = THCTensor_(newNarrow)(state, x, dimension, 0, 1); THCTensor *ny = THCTensor_(newNarrow)(state, y, dimension, 0, 1); THCTensor *nself = THCTensor_(newNarrow)(state, self, dimension, 0, 1); if (!THC_pointwiseApply3<scalar_t, scalar_t, scalar_t>(state, nself, nx, ny, TensorCrossOp<scalar_t>(sx, sy, so))) { THArgCheck(false, 2, CUTORCH_DIM_WARNING); } THCTensor_(free)(state, nx); THCTensor_(free)(state, ny); THCTensor_(free)(state, nself); } #if defined(THC_REAL_IS_FLOAT) || defined(THC_REAL_IS_DOUBLE) || defined(THC_REAL_IS_HALF) void THCTensor_(sigmoid)(THCState* state, THCTensor* self_, THCTensor* src) { THCAssertSameGPU(THCTensor_(checkGPU)(state, 2, self_, src)); if (self_ == src) { if (!THC_pointwiseApply1<scalar_t>(state, self_, TensorSigmoidOp<scalar_t>())) { THArgCheck(false, 2, CUTORCH_DIM_WARNING); } } else { THCTensor_(resizeAs)(state, self_, src); if (!THC_pointwiseApply2<scalar_t, scalar_t>(state, self_, src, TensorSigmoidOp<scalar_t>())) { THArgCheck(false, 2, CUTORCH_DIM_WARNING); } } THCudaCheck(hipGetLastError()); #ifdef BUILD_NAMEDTENSOR at::namedinference::propagate_names(self_, src); #endif } void THCTensor_(digamma)(THCState* state, THCTensor* self_, THCTensor* src) { THCAssertSameGPU(THCTensor_(checkGPU)(state, 2, self_, src)); if (self_ != src) { THCTensor_(resizeAs)(state, self_, src); } if (!THC_pointwiseApply2<scalar_t, scalar_t>(state, self_, src, TensorDigammaOp<scalar_t, accreal>())) { THArgCheck(false, 2, CUTORCH_DIM_WARNING); } THCudaCheck(hipGetLastError()); #ifdef BUILD_NAMEDTENSOR at::namedinference::propagate_names(self_, src); #endif } void THCTensor_(polygamma)(THCState* state, THCTensor* self_, int64_t n, THCTensor* src) { THCAssertSameGPU(THCTensor_(checkGPU)(state, 2, self_, src)); if (self_ != src) { THCTensor_(resizeAs)(state, self_, src); } switch (n) { case 0: if (!THC_pointwiseApply2<scalar_t, scalar_t>(state, self_, src, TensorDigammaOp<scalar_t, accreal>())) { THArgCheck(false, 2, CUTORCH_DIM_WARNING); } break; case 1: if (!THC_pointwiseApply2<scalar_t, scalar_t>(state, self_, src, TensorTrigammaOp<scalar_t, accreal>())) { THArgCheck(false, 2, CUTORCH_DIM_WARNING); } break; default: THError("polygamma(n,x) is not implemented for n>=2"); } THCudaCheck(hipGetLastError()); #ifdef BUILD_NAMEDTENSOR at::namedinference::propagate_names(self_, src); #endif } #endif namespace { c10::intrusive_ptr<at::TensorImpl, at::UndefinedTensorImpl> retainTensorImpl(THCTensor* self) { c10::raw::intrusive_ptr::incref(self); return c10::intrusive_ptr<at::TensorImpl, at::UndefinedTensorImpl>::reclaim(self); } } void THCTensor_(cadd)(THCState *state, THCTensor *self_, THCTensor* src1, scalar_t value, THCTensor *src2) { auto out = at::Tensor(retainTensorImpl(self_)); #ifdef THC_REAL_IS_HALF auto alpha = at::Half(value); #else auto alpha = value; #endif at::add_out(out, at::Tensor(retainTensorImpl(src1)), at::Tensor(retainTensorImpl(src2)), alpha); } void THCTensor_(csub)(THCState *state, THCTensor *self_, THCTensor* src1, scalar_t value, THCTensor *src2) { auto out = at::Tensor(retainTensorImpl(self_)); #ifdef THC_REAL_IS_HALF auto alpha = at::Half(value); #else auto alpha = value; #endif at::sub_out(out, at::Tensor(retainTensorImpl(src1)), at::Tensor(retainTensorImpl(src2)), alpha); } void THCTensor_(cmul)(THCState *state, THCTensor *self_, THCTensor *src1, THCTensor *src2) { auto out = at::Tensor(retainTensorImpl(self_)); at::mul_out(out, at::Tensor(retainTensorImpl(src1)), at::Tensor(retainTensorImpl(src2))); } void THCTensor_(cpow)(THCState *state, THCTensor *self_, THCTensor *src1, THCTensor *src2) { THCAssertSameGPU(THCTensor_(checkGPU)(state, 3, self_, src1, src2)); THArgCheck(THCTensor_(nElement)(state, src1) == THCTensor_(nElement)(state, src2), 3, "sizes do not match"); if (self_ == src1) { // self = pow(self, src2) if (!THC_pointwiseApply2<scalar_t, scalar_t>(state, self_, src2, TensorCPowOp<scalar_t>())) { THArgCheck(false, 2, CUTORCH_DIM_WARNING); } } else { THCTensor_(resizeAs)(state, self_, src1); // self = pow(src1, src2) if (!THC_pointwiseApply3<scalar_t, scalar_t, scalar_t>(state, self_, src1, src2, TensorCPowOp<scalar_t>())) { THArgCheck(false, 2, CUTORCH_DIM_WARNING); } } THCudaCheck(hipGetLastError()); } void THCTensor_(pow)(THCState *state, THCTensor *self_, THCTensor *src, scalar_t value) { #if defined(THC_REAL_IS_BYTE) || defined(THC_REAL_IS_CHAR) || defined(THC_REAL_IS_SHORT) || defined(THC_REAL_IS_INT) || defined(THC_REAL_IS_LONG) if (THCNumerics<scalar_t>::lt(value, ScalarConvert<int, scalar_t>::to(0))) { THError("Integers to negative integer powers are not allowed."); } #endif THCAssertSameGPU(THCTensor_(checkGPU)(state, 2, self_, src)); if (self_ == src) { if (THCNumerics<scalar_t>::eq(value, ScalarConvert<int, scalar_t>::to(1))) { if (!THC_pointwiseApply1<scalar_t>(state, self_, TensorPowOp<scalar_t, 1>(value))) { THArgCheck(false, 2, CUTORCH_DIM_WARNING); } } else if (THCNumerics<scalar_t>::eq(value, ScalarConvert<int, scalar_t>::to(2))) { if (!THC_pointwiseApply1<scalar_t>(state, self_, TensorPowOp<scalar_t, 2>(value))) { THArgCheck(false, 2, CUTORCH_DIM_WARNING); } } else if (THCNumerics<scalar_t>::eq(value, ScalarConvert<int, scalar_t>::to(3))) { if (!THC_pointwiseApply1<scalar_t>(state, self_, TensorPowOp<scalar_t, 3>(value))) { THArgCheck(false, 2, CUTORCH_DIM_WARNING); } #if defined(THC_REAL_IS_HALF) || defined(THC_REAL_IS_FLOAT) || defined(THC_REAL_IS_DOUBLE) } else if (THCNumerics<scalar_t>::eq(value, ScalarConvert<int, scalar_t>::to(-1))) { if (!THC_pointwiseApply1<scalar_t>(state, self_, TensorPowOp<scalar_t, -1>(value))) { THArgCheck(false, 2, CUTORCH_DIM_WARNING); } } else if (THCNumerics<scalar_t>::eq(value, ScalarConvert<int, scalar_t>::to(-2))) { if (!THC_pointwiseApply1<scalar_t>(state, self_, TensorPowOp<scalar_t, -2>(value))) { THArgCheck(false, 2, CUTORCH_DIM_WARNING); } #endif } else { // fallback implementation using pow if (!THC_pointwiseApply1<scalar_t>(state, self_, TensorPowOp<scalar_t, -3>(value))) { THArgCheck(false, 2, CUTORCH_DIM_WARNING); } } } else { THCTensor_(resizeAs)(state, self_, src); if (THCNumerics<scalar_t>::eq(value, ScalarConvert<int, scalar_t>::to(1))) { if (!THC_pointwiseApply2<scalar_t, scalar_t>(state, self_, src, TensorPowOp<scalar_t, 1>(value))) { THArgCheck(false, 2, CUTORCH_DIM_WARNING); } } else if (THCNumerics<scalar_t>::eq(value, ScalarConvert<int, scalar_t>::to(2))) { if (!THC_pointwiseApply2<scalar_t, scalar_t>(state, self_, src, TensorPowOp<scalar_t, 2>(value))) { THArgCheck(false, 2, CUTORCH_DIM_WARNING); } } else if (THCNumerics<scalar_t>::eq(value, ScalarConvert<int, scalar_t>::to(3))) { if (!THC_pointwiseApply2<scalar_t, scalar_t>(state, self_, src, TensorPowOp<scalar_t, 3>(value))) { THArgCheck(false, 2, CUTORCH_DIM_WARNING); } #if defined(THC_REAL_IS_HALF) || defined(THC_REAL_IS_FLOAT) || defined(THC_REAL_IS_DOUBLE) } else if (THCNumerics<scalar_t>::eq(value, ScalarConvert<int, scalar_t>::to(-1))) { if (!THC_pointwiseApply2<scalar_t, scalar_t>(state, self_, src, TensorPowOp<scalar_t, -1>(value))) { THArgCheck(false, 2, CUTORCH_DIM_WARNING); } } else if (THCNumerics<scalar_t>::eq(value, ScalarConvert<int, scalar_t>::to(-2))) { if (!THC_pointwiseApply2<scalar_t, scalar_t>(state, self_, src, TensorPowOp<scalar_t, -2>(value))) { THArgCheck(false, 2, CUTORCH_DIM_WARNING); } #endif } else { // fallback implementation using pow if (!THC_pointwiseApply2<scalar_t, scalar_t>(state, self_, src, TensorPowOp<scalar_t, -3>(value))) { THArgCheck(false, 2, CUTORCH_DIM_WARNING); } } } THCudaCheck(hipGetLastError()); } void THCTensor_(tpow)(THCState *state, THCTensor *self_, scalar_t value, THCTensor *src) { THCAssertSameGPU(THCTensor_(checkGPU)(state, 2, self_, src)); if (self_ == src) { if (!THC_pointwiseApply1<scalar_t>(state, self_, TensorTPowOp<scalar_t>(value))) { THArgCheck(false, 2, CUTORCH_DIM_WARNING); } } else { THCTensor_(resizeAs)(state, self_, src); if (!THC_pointwiseApply2<scalar_t, scalar_t>(state, self_, src, TensorTPowOp<scalar_t>(value))) { THArgCheck(false, 2, CUTORCH_DIM_WARNING); } } THCudaCheck(hipGetLastError()); } void THCTensor_(cdiv)(THCState* state, THCTensor *self_, THCTensor *src1, THCTensor *src2) { auto out = at::Tensor(retainTensorImpl(self_)); at::div_out(out, at::Tensor(retainTensorImpl(src1)), at::Tensor(retainTensorImpl(src2))); } void THCTensor_(clshift)(THCState* state, THCTensor *self_, THCTensor *src1, THCTensor *src2) { #if defined(THC_REAL_IS_HALF) return THError("clshift not supported for torch.CudaHalfTensor"); #else THAssert(THCTensor_(checkGPU)(state, 3, self_, src1, src2)); THArgCheck(THCTensor_(nElement)(state, src1) == THCTensor_(nElement)(state, src2), 3, "sizes do not match"); if (self_ == src1) { // self /= src2 if (!THC_pointwiseApply2<scalar_t, scalar_t>(state, self_, src2, TensorLShiftOp<scalar_t>())) { THArgCheck(false, 2, CUTORCH_DIM_WARNING); } } else { THCTensor_(resizeAs)(state, self_, src1); // self = src1 / src2 if (!THC_pointwiseApply3<scalar_t, scalar_t, scalar_t>(state, self_, src1, src2, TensorLShiftOp<scalar_t>())) { THArgCheck(false, 2, CUTORCH_DIM_WARNING); } } THCudaCheck(hipGetLastError()); #endif } void THCTensor_(crshift)(THCState* state, THCTensor *self_, THCTensor *src1, THCTensor *src2) { #if defined(THC_REAL_IS_HALF) return THError("crshift not supported for torch.CudaHalfTensor"); #else THAssert(THCTensor_(checkGPU)(state, 3, self_, src1, src2)); THArgCheck(THCTensor_(nElement)(state, src1) == THCTensor_(nElement)(state, src2), 3, "sizes do not match"); if (self_ == src1) { // self /= src2 if (!THC_pointwiseApply2<scalar_t, scalar_t>(state, self_, src2, TensorRShiftOp<scalar_t>())) { THArgCheck(false, 2, CUTORCH_DIM_WARNING); } } else { THCTensor_(resizeAs)(state, self_, src1); // self = src1 / src2 if (!THC_pointwiseApply3<scalar_t, scalar_t, scalar_t>(state, self_, src1, src2, TensorRShiftOp<scalar_t>())) { THArgCheck(false, 2, CUTORCH_DIM_WARNING); } } THCudaCheck(hipGetLastError()); #endif } void THCTensor_(cremainder)(THCState *state, THCTensor *self, THCTensor *src1, THCTensor *src2) { THCAssertSameGPU(THCTensor_(checkGPU)(state, 3, self, src1, src2)); THArgCheck(THCTensor_(nElement)(state, src1) == THCTensor_(nElement)(state, src2), 2, "sizes do not match"); if (self == src1) { if (!THC_pointwiseApply2<scalar_t, scalar_t>(state, self, src2, TensorCRemainderOp<scalar_t>())) { THArgCheck(false, 2, CUTORCH_DIM_WARNING); } } else { THCTensor_(resizeAs)(state, self, src1); if (!THC_pointwiseApply3<scalar_t, scalar_t, scalar_t>(state, self, src1, src2, TensorCRemainderOp<scalar_t>())) { THArgCheck(false, 2, CUTORCH_DIM_WARNING); } } } void THCTensor_(cfmod)(THCState *state, THCTensor *self, THCTensor *src1, THCTensor *src2) { THCAssertSameGPU(THCTensor_(checkGPU)(state, 3, self, src1, src2)); THArgCheck(THCTensor_(nElement)(state, src1) == THCTensor_(nElement)(state, src2), 2, "sizes do not match"); if (self == src1) { if (!THC_pointwiseApply2<scalar_t, scalar_t>(state, self, src2, TensorCFmodOp<scalar_t>())) { THArgCheck(false, 2, CUTORCH_DIM_WARNING); } } else { THCTensor_(resizeAs)(state, self, src1); if (!THC_pointwiseApply3<scalar_t, scalar_t, scalar_t>(state, self, src1, src2, TensorCFmodOp<scalar_t>())) { THArgCheck(false, 2, CUTORCH_DIM_WARNING); } } } #endif #endif
b3571414920fbf7d8f6c49aaa10d35c68b63ba3a.cu
#ifndef THC_GENERIC_FILE #define THC_GENERIC_FILE "THC/generic/THCTensorMathPointwise.cu" #else #include <ATen/MemoryOverlap.h> #include <ATen/NamedTensorUtils.h> void THCTensor_(cbitand)(THCState* state, THCTensor *self_, THCTensor *src1, THCTensor *src2) { #if defined(THC_REAL_IS_HALF) || defined(THC_REAL_IS_FLOAT) || defined(THC_REAL_IS_DOUBLE) return THError("cbitand is only supported for integer type tensors"); #else THAssert(THCTensor_(checkGPU)(state, 3, self_, src1, src2)); THArgCheck(THCTensor_(nElement)(state, src1) == THCTensor_(nElement)(state, src2), 3, "sizes do not match"); if (self_ == src1) { // self /= src2 if (!THC_pointwiseApply2<scalar_t, scalar_t>(state, self_, src2, TensorBitAndOp<scalar_t>())) { THArgCheck(false, 2, CUTORCH_DIM_WARNING); } } else { THCTensor_(resizeAs)(state, self_, src1); // self = src1 / src2 if (!THC_pointwiseApply3<scalar_t, scalar_t, scalar_t>(state, self_, src1, src2, TensorBitAndOp<scalar_t>())) { THArgCheck(false, 2, CUTORCH_DIM_WARNING); } } THCudaCheck(cudaGetLastError()); #endif } void THCTensor_(cbitor)(THCState* state, THCTensor *self_, THCTensor *src1, THCTensor *src2) { #if defined(THC_REAL_IS_HALF) || defined(THC_REAL_IS_FLOAT) || defined(THC_REAL_IS_DOUBLE) return THError("cbitor is only supported for integer type tensors"); #else THAssert(THCTensor_(checkGPU)(state, 3, self_, src1, src2)); THArgCheck(THCTensor_(nElement)(state, src1) == THCTensor_(nElement)(state, src2), 3, "sizes do not match"); if (self_ == src1) { // self /= src2 if (!THC_pointwiseApply2<scalar_t, scalar_t>(state, self_, src2, TensorBitOrOp<scalar_t>())) { THArgCheck(false, 2, CUTORCH_DIM_WARNING); } } else { THCTensor_(resizeAs)(state, self_, src1); // self = src1 / src2 if (!THC_pointwiseApply3<scalar_t, scalar_t, scalar_t>(state, self_, src1, src2, TensorBitOrOp<scalar_t>())) { THArgCheck(false, 2, CUTORCH_DIM_WARNING); } } THCudaCheck(cudaGetLastError()); #endif } void THCTensor_(cbitxor)(THCState* state, THCTensor *self_, THCTensor *src1, THCTensor *src2) { #if defined(THC_REAL_IS_HALF) || defined(THC_REAL_IS_FLOAT) || defined(THC_REAL_IS_DOUBLE) return THError("cbitor is only supported for integer type tensors"); #else THAssert(THCTensor_(checkGPU)(state, 3, self_, src1, src2)); THArgCheck(THCTensor_(nElement)(state, src1) == THCTensor_(nElement)(state, src2), 3, "sizes do not match"); if (self_ == src1) { // self /= src2 if (!THC_pointwiseApply2<scalar_t, scalar_t>(state, self_, src2, TensorBitXorOp<scalar_t>())) { THArgCheck(false, 2, CUTORCH_DIM_WARNING); } } else { THCTensor_(resizeAs)(state, self_, src1); // self = src1 / src2 if (!THC_pointwiseApply3<scalar_t, scalar_t, scalar_t>(state, self_, src1, src2, TensorBitXorOp<scalar_t>())) { THArgCheck(false, 2, CUTORCH_DIM_WARNING); } } THCudaCheck(cudaGetLastError()); #endif } void THCTensor_(cmax)(THCState *state, THCTensor *self, THCTensor *src1, THCTensor *src2) { THCAssertSameGPU(THCTensor_(checkGPU)(state, 3, self, src1, src2)); THArgCheck(THCTensor_(nElement)(state, src1) == THCTensor_(nElement)(state, src2), 2, "sizes do not match"); if (self == src1) { if (!THC_pointwiseApply2<scalar_t, scalar_t>(state, self, src2, TensorMaxOp<scalar_t>())) { THArgCheck(false, 2, CUTORCH_DIM_WARNING); } } else { THCTensor_(resizeAs)(state, self, src1); if (!THC_pointwiseApply3<scalar_t, scalar_t, scalar_t>(state, self, src1, src2, TensorMaxOp<scalar_t>())) { THArgCheck(false, 2, CUTORCH_DIM_WARNING); } } } void THCTensor_(cmin)(THCState *state, THCTensor *self, THCTensor *src1, THCTensor *src2) { THCAssertSameGPU(THCTensor_(checkGPU)(state, 3, self, src1, src2)); THArgCheck(THCTensor_(nElement)(state, src1) == THCTensor_(nElement)(state, src2), 2, "sizes do not match"); if (self == src1) { if (!THC_pointwiseApply2<scalar_t, scalar_t>(state, self, src2, TensorMinOp<scalar_t>())) { THArgCheck(false, 2, CUTORCH_DIM_WARNING); } } else { THCTensor_(resizeAs)(state, self, src1); if (!THC_pointwiseApply3<scalar_t, scalar_t, scalar_t>(state, self, src1, src2, TensorMinOp<scalar_t>())) { THArgCheck(false, 2, CUTORCH_DIM_WARNING); } } } void THCTensor_(cmaxValue)(THCState *state, THCTensor *self, THCTensor *src, scalar_t value) { THCAssertSameGPU(THCTensor_(checkGPU)(state, 2, self, src)); if (self == src) { if (!THC_pointwiseApply1<scalar_t>(state, self, TensorMaxValueOp<scalar_t>(value))) { THArgCheck(false, 2, CUTORCH_DIM_WARNING); } } else { THCTensor_(resizeAs)(state, self, src); if (!THC_pointwiseApply2<scalar_t, scalar_t>(state, self, src, TensorMaxValueOp<scalar_t>(value))) { THArgCheck(false, 2, CUTORCH_DIM_WARNING); } } } void THCTensor_(cminValue)(THCState *state, THCTensor *self, THCTensor *src, scalar_t value) { THCAssertSameGPU(THCTensor_(checkGPU)(state, 2, self, src)); if (self == src) { if (!THC_pointwiseApply1<scalar_t>(state, self, TensorMinValueOp<scalar_t>(value))) { THArgCheck(false, 2, CUTORCH_DIM_WARNING); } } else { THCTensor_(resizeAs)(state, self, src); if (!THC_pointwiseApply2<scalar_t, scalar_t>(state, self, src, TensorMinValueOp<scalar_t>(value))) { THArgCheck(false, 2, CUTORCH_DIM_WARNING); } } } #if !defined(THC_REAL_IS_BOOL) static void propagate_names_if_named_tensor_enabled(THCTensor* result, THCTensor* src) { #ifdef BUILD_NAMEDTENSOR at::namedinference::propagate_names(result, src); #endif } #define IMPLEMENT_CUDA_TENSOR_BASIC_FUNC_(NAME, CFUNC, REAL) \ struct Tensor_##NAME##_##REAL##_Op { \ __device__ __forceinline__ void operator()(scalar_t* out, scalar_t* in) const { \ *out = CFUNC(*in); \ } \ \ __device__ __forceinline__ void operator()(scalar_t* v) const { \ *v = CFUNC(*v); \ } \ }; \ \ void THCTensor_(NAME)(THCState* state, THCTensor* self_, THCTensor* src) { \ THCAssertSameGPU(THCTensor_(checkGPU)(state, 2, self_, src)); \ at::assert_no_internal_overlap(self_); \ if (self_ == src) { \ if (!THC_pointwiseApply1<scalar_t>(state, self_, Tensor_##NAME##_##REAL##_Op())) { \ THArgCheck(false, 2, CUTORCH_DIM_WARNING); \ } \ } else { \ THCTensor_(resizeAs)(state, self_, src); \ \ if (!THC_pointwiseApply2<scalar_t, scalar_t>(state, self_, src, Tensor_##NAME##_##REAL##_Op())) { \ THArgCheck(false, 2, CUTORCH_DIM_WARNING); \ } \ } \ \ THCudaCheck(cudaGetLastError()); \ propagate_names_if_named_tensor_enabled(self_, src); \ } #define IMPLEMENT_CUDA_TENSOR_BASIC_FUNC(NAME, CFUNC, REAL) \ IMPLEMENT_CUDA_TENSOR_BASIC_FUNC_(NAME, CFUNC, REAL) #if defined(THC_REAL_IS_FLOAT) || defined(THC_REAL_IS_DOUBLE) || defined(THC_REAL_IS_HALF) IMPLEMENT_CUDA_TENSOR_BASIC_FUNC( log, THCNumerics<scalar_t>::log, Real) IMPLEMENT_CUDA_TENSOR_BASIC_FUNC(lgamma, THCNumerics<scalar_t>::lgamma, Real) IMPLEMENT_CUDA_TENSOR_BASIC_FUNC(log10, THCNumerics<scalar_t>::log10, Real) IMPLEMENT_CUDA_TENSOR_BASIC_FUNC(log1p, THCNumerics<scalar_t>::log1p, Real) IMPLEMENT_CUDA_TENSOR_BASIC_FUNC( log2, THCNumerics<scalar_t>::log2, Real) IMPLEMENT_CUDA_TENSOR_BASIC_FUNC( exp, THCNumerics<scalar_t>::exp, Real) IMPLEMENT_CUDA_TENSOR_BASIC_FUNC(expm1, THCNumerics<scalar_t>::expm1, Real) IMPLEMENT_CUDA_TENSOR_BASIC_FUNC( cos, THCNumerics<scalar_t>::cos, Real) IMPLEMENT_CUDA_TENSOR_BASIC_FUNC( sin, THCNumerics<scalar_t>::sin, Real) IMPLEMENT_CUDA_TENSOR_BASIC_FUNC( sqrt, THCNumerics<scalar_t>::sqrt, Real) IMPLEMENT_CUDA_TENSOR_BASIC_FUNC(rsqrt, THCNumerics<scalar_t>::rsqrt, Real) IMPLEMENT_CUDA_TENSOR_BASIC_FUNC(floor, THCNumerics<scalar_t>::floor, Real) IMPLEMENT_CUDA_TENSOR_BASIC_FUNC(trunc, THCNumerics<scalar_t>::trunc, Real) IMPLEMENT_CUDA_TENSOR_BASIC_FUNC( acos, THCNumerics<scalar_t>::acos, Real) IMPLEMENT_CUDA_TENSOR_BASIC_FUNC( cosh, THCNumerics<scalar_t>::cosh, Real) IMPLEMENT_CUDA_TENSOR_BASIC_FUNC( asin, THCNumerics<scalar_t>::asin, Real) IMPLEMENT_CUDA_TENSOR_BASIC_FUNC( sinh, THCNumerics<scalar_t>::sinh, Real) IMPLEMENT_CUDA_TENSOR_BASIC_FUNC( tan, THCNumerics<scalar_t>::tan, Real) IMPLEMENT_CUDA_TENSOR_BASIC_FUNC( atan, THCNumerics<scalar_t>::atan, Real) IMPLEMENT_CUDA_TENSOR_BASIC_FUNC( tanh, THCNumerics<scalar_t>::tanh, Real) IMPLEMENT_CUDA_TENSOR_BASIC_FUNC( erf, THCNumerics<scalar_t>::erf, Real) IMPLEMENT_CUDA_TENSOR_BASIC_FUNC( erfc, THCNumerics<scalar_t>::erfc, Real) IMPLEMENT_CUDA_TENSOR_BASIC_FUNC( round, THCNumerics<scalar_t>::round, Real) IMPLEMENT_CUDA_TENSOR_BASIC_FUNC( frac, THCNumerics<scalar_t>::frac, Real) IMPLEMENT_CUDA_TENSOR_BASIC_FUNC( cinv, THCNumerics<scalar_t>::cinv, Real) #endif IMPLEMENT_CUDA_TENSOR_BASIC_FUNC( abs, THCNumerics<scalar_t>::abs, Real) #undef IMPLEMENT_CUDA_TENSOR_BASIC_FUNC_ #undef IMPLEMENT_CUDA_TENSOR_BASIC_FUNC void THCTensor_(clamp)(THCState *state, THCTensor *self_, THCTensor *src, scalar_t min_value, scalar_t max_value) { THCAssertSameGPU(THCTensor_(checkGPU)(state, 2, self_, src)); if (self_ == src) { if (!THC_pointwiseApply1<scalar_t>(state, self_, TensorClampOp<scalar_t>(min_value, max_value))) { THArgCheck(false, 2, CUTORCH_DIM_WARNING); } } else { THCTensor_(resizeAs)(state, self_, src); if (!THC_pointwiseApply2<scalar_t, scalar_t>(state, self_, src, TensorClampOp<scalar_t>(min_value, max_value))) { THArgCheck(false, 2, CUTORCH_DIM_WARNING); } } THCudaCheck(cudaGetLastError()); } void THCTensor_(crossKernel)(THCState *state, THCTensor *self, THCTensor *x, THCTensor *y, int dimension) { THCAssertSameGPU(THCTensor_(checkGPU)(state, 3, self, x, y)); int64_t sx = THCTensor_(stride)(state, x, dimension); int64_t sy = THCTensor_(stride)(state, y, dimension); int64_t so = THCTensor_(stride)(state, self, dimension); THCTensor *nx = THCTensor_(newNarrow)(state, x, dimension, 0, 1); THCTensor *ny = THCTensor_(newNarrow)(state, y, dimension, 0, 1); THCTensor *nself = THCTensor_(newNarrow)(state, self, dimension, 0, 1); if (!THC_pointwiseApply3<scalar_t, scalar_t, scalar_t>(state, nself, nx, ny, TensorCrossOp<scalar_t>(sx, sy, so))) { THArgCheck(false, 2, CUTORCH_DIM_WARNING); } THCTensor_(free)(state, nx); THCTensor_(free)(state, ny); THCTensor_(free)(state, nself); } #if defined(THC_REAL_IS_FLOAT) || defined(THC_REAL_IS_DOUBLE) || defined(THC_REAL_IS_HALF) void THCTensor_(sigmoid)(THCState* state, THCTensor* self_, THCTensor* src) { THCAssertSameGPU(THCTensor_(checkGPU)(state, 2, self_, src)); if (self_ == src) { if (!THC_pointwiseApply1<scalar_t>(state, self_, TensorSigmoidOp<scalar_t>())) { THArgCheck(false, 2, CUTORCH_DIM_WARNING); } } else { THCTensor_(resizeAs)(state, self_, src); if (!THC_pointwiseApply2<scalar_t, scalar_t>(state, self_, src, TensorSigmoidOp<scalar_t>())) { THArgCheck(false, 2, CUTORCH_DIM_WARNING); } } THCudaCheck(cudaGetLastError()); #ifdef BUILD_NAMEDTENSOR at::namedinference::propagate_names(self_, src); #endif } void THCTensor_(digamma)(THCState* state, THCTensor* self_, THCTensor* src) { THCAssertSameGPU(THCTensor_(checkGPU)(state, 2, self_, src)); if (self_ != src) { THCTensor_(resizeAs)(state, self_, src); } if (!THC_pointwiseApply2<scalar_t, scalar_t>(state, self_, src, TensorDigammaOp<scalar_t, accreal>())) { THArgCheck(false, 2, CUTORCH_DIM_WARNING); } THCudaCheck(cudaGetLastError()); #ifdef BUILD_NAMEDTENSOR at::namedinference::propagate_names(self_, src); #endif } void THCTensor_(polygamma)(THCState* state, THCTensor* self_, int64_t n, THCTensor* src) { THCAssertSameGPU(THCTensor_(checkGPU)(state, 2, self_, src)); if (self_ != src) { THCTensor_(resizeAs)(state, self_, src); } switch (n) { case 0: if (!THC_pointwiseApply2<scalar_t, scalar_t>(state, self_, src, TensorDigammaOp<scalar_t, accreal>())) { THArgCheck(false, 2, CUTORCH_DIM_WARNING); } break; case 1: if (!THC_pointwiseApply2<scalar_t, scalar_t>(state, self_, src, TensorTrigammaOp<scalar_t, accreal>())) { THArgCheck(false, 2, CUTORCH_DIM_WARNING); } break; default: THError("polygamma(n,x) is not implemented for n>=2"); } THCudaCheck(cudaGetLastError()); #ifdef BUILD_NAMEDTENSOR at::namedinference::propagate_names(self_, src); #endif } #endif namespace { c10::intrusive_ptr<at::TensorImpl, at::UndefinedTensorImpl> retainTensorImpl(THCTensor* self) { c10::raw::intrusive_ptr::incref(self); return c10::intrusive_ptr<at::TensorImpl, at::UndefinedTensorImpl>::reclaim(self); } } void THCTensor_(cadd)(THCState *state, THCTensor *self_, THCTensor* src1, scalar_t value, THCTensor *src2) { auto out = at::Tensor(retainTensorImpl(self_)); #ifdef THC_REAL_IS_HALF auto alpha = at::Half(value); #else auto alpha = value; #endif at::add_out(out, at::Tensor(retainTensorImpl(src1)), at::Tensor(retainTensorImpl(src2)), alpha); } void THCTensor_(csub)(THCState *state, THCTensor *self_, THCTensor* src1, scalar_t value, THCTensor *src2) { auto out = at::Tensor(retainTensorImpl(self_)); #ifdef THC_REAL_IS_HALF auto alpha = at::Half(value); #else auto alpha = value; #endif at::sub_out(out, at::Tensor(retainTensorImpl(src1)), at::Tensor(retainTensorImpl(src2)), alpha); } void THCTensor_(cmul)(THCState *state, THCTensor *self_, THCTensor *src1, THCTensor *src2) { auto out = at::Tensor(retainTensorImpl(self_)); at::mul_out(out, at::Tensor(retainTensorImpl(src1)), at::Tensor(retainTensorImpl(src2))); } void THCTensor_(cpow)(THCState *state, THCTensor *self_, THCTensor *src1, THCTensor *src2) { THCAssertSameGPU(THCTensor_(checkGPU)(state, 3, self_, src1, src2)); THArgCheck(THCTensor_(nElement)(state, src1) == THCTensor_(nElement)(state, src2), 3, "sizes do not match"); if (self_ == src1) { // self = pow(self, src2) if (!THC_pointwiseApply2<scalar_t, scalar_t>(state, self_, src2, TensorCPowOp<scalar_t>())) { THArgCheck(false, 2, CUTORCH_DIM_WARNING); } } else { THCTensor_(resizeAs)(state, self_, src1); // self = pow(src1, src2) if (!THC_pointwiseApply3<scalar_t, scalar_t, scalar_t>(state, self_, src1, src2, TensorCPowOp<scalar_t>())) { THArgCheck(false, 2, CUTORCH_DIM_WARNING); } } THCudaCheck(cudaGetLastError()); } void THCTensor_(pow)(THCState *state, THCTensor *self_, THCTensor *src, scalar_t value) { #if defined(THC_REAL_IS_BYTE) || defined(THC_REAL_IS_CHAR) || defined(THC_REAL_IS_SHORT) || defined(THC_REAL_IS_INT) || defined(THC_REAL_IS_LONG) if (THCNumerics<scalar_t>::lt(value, ScalarConvert<int, scalar_t>::to(0))) { THError("Integers to negative integer powers are not allowed."); } #endif THCAssertSameGPU(THCTensor_(checkGPU)(state, 2, self_, src)); if (self_ == src) { if (THCNumerics<scalar_t>::eq(value, ScalarConvert<int, scalar_t>::to(1))) { if (!THC_pointwiseApply1<scalar_t>(state, self_, TensorPowOp<scalar_t, 1>(value))) { THArgCheck(false, 2, CUTORCH_DIM_WARNING); } } else if (THCNumerics<scalar_t>::eq(value, ScalarConvert<int, scalar_t>::to(2))) { if (!THC_pointwiseApply1<scalar_t>(state, self_, TensorPowOp<scalar_t, 2>(value))) { THArgCheck(false, 2, CUTORCH_DIM_WARNING); } } else if (THCNumerics<scalar_t>::eq(value, ScalarConvert<int, scalar_t>::to(3))) { if (!THC_pointwiseApply1<scalar_t>(state, self_, TensorPowOp<scalar_t, 3>(value))) { THArgCheck(false, 2, CUTORCH_DIM_WARNING); } #if defined(THC_REAL_IS_HALF) || defined(THC_REAL_IS_FLOAT) || defined(THC_REAL_IS_DOUBLE) } else if (THCNumerics<scalar_t>::eq(value, ScalarConvert<int, scalar_t>::to(-1))) { if (!THC_pointwiseApply1<scalar_t>(state, self_, TensorPowOp<scalar_t, -1>(value))) { THArgCheck(false, 2, CUTORCH_DIM_WARNING); } } else if (THCNumerics<scalar_t>::eq(value, ScalarConvert<int, scalar_t>::to(-2))) { if (!THC_pointwiseApply1<scalar_t>(state, self_, TensorPowOp<scalar_t, -2>(value))) { THArgCheck(false, 2, CUTORCH_DIM_WARNING); } #endif } else { // fallback implementation using pow if (!THC_pointwiseApply1<scalar_t>(state, self_, TensorPowOp<scalar_t, -3>(value))) { THArgCheck(false, 2, CUTORCH_DIM_WARNING); } } } else { THCTensor_(resizeAs)(state, self_, src); if (THCNumerics<scalar_t>::eq(value, ScalarConvert<int, scalar_t>::to(1))) { if (!THC_pointwiseApply2<scalar_t, scalar_t>(state, self_, src, TensorPowOp<scalar_t, 1>(value))) { THArgCheck(false, 2, CUTORCH_DIM_WARNING); } } else if (THCNumerics<scalar_t>::eq(value, ScalarConvert<int, scalar_t>::to(2))) { if (!THC_pointwiseApply2<scalar_t, scalar_t>(state, self_, src, TensorPowOp<scalar_t, 2>(value))) { THArgCheck(false, 2, CUTORCH_DIM_WARNING); } } else if (THCNumerics<scalar_t>::eq(value, ScalarConvert<int, scalar_t>::to(3))) { if (!THC_pointwiseApply2<scalar_t, scalar_t>(state, self_, src, TensorPowOp<scalar_t, 3>(value))) { THArgCheck(false, 2, CUTORCH_DIM_WARNING); } #if defined(THC_REAL_IS_HALF) || defined(THC_REAL_IS_FLOAT) || defined(THC_REAL_IS_DOUBLE) } else if (THCNumerics<scalar_t>::eq(value, ScalarConvert<int, scalar_t>::to(-1))) { if (!THC_pointwiseApply2<scalar_t, scalar_t>(state, self_, src, TensorPowOp<scalar_t, -1>(value))) { THArgCheck(false, 2, CUTORCH_DIM_WARNING); } } else if (THCNumerics<scalar_t>::eq(value, ScalarConvert<int, scalar_t>::to(-2))) { if (!THC_pointwiseApply2<scalar_t, scalar_t>(state, self_, src, TensorPowOp<scalar_t, -2>(value))) { THArgCheck(false, 2, CUTORCH_DIM_WARNING); } #endif } else { // fallback implementation using pow if (!THC_pointwiseApply2<scalar_t, scalar_t>(state, self_, src, TensorPowOp<scalar_t, -3>(value))) { THArgCheck(false, 2, CUTORCH_DIM_WARNING); } } } THCudaCheck(cudaGetLastError()); } void THCTensor_(tpow)(THCState *state, THCTensor *self_, scalar_t value, THCTensor *src) { THCAssertSameGPU(THCTensor_(checkGPU)(state, 2, self_, src)); if (self_ == src) { if (!THC_pointwiseApply1<scalar_t>(state, self_, TensorTPowOp<scalar_t>(value))) { THArgCheck(false, 2, CUTORCH_DIM_WARNING); } } else { THCTensor_(resizeAs)(state, self_, src); if (!THC_pointwiseApply2<scalar_t, scalar_t>(state, self_, src, TensorTPowOp<scalar_t>(value))) { THArgCheck(false, 2, CUTORCH_DIM_WARNING); } } THCudaCheck(cudaGetLastError()); } void THCTensor_(cdiv)(THCState* state, THCTensor *self_, THCTensor *src1, THCTensor *src2) { auto out = at::Tensor(retainTensorImpl(self_)); at::div_out(out, at::Tensor(retainTensorImpl(src1)), at::Tensor(retainTensorImpl(src2))); } void THCTensor_(clshift)(THCState* state, THCTensor *self_, THCTensor *src1, THCTensor *src2) { #if defined(THC_REAL_IS_HALF) return THError("clshift not supported for torch.CudaHalfTensor"); #else THAssert(THCTensor_(checkGPU)(state, 3, self_, src1, src2)); THArgCheck(THCTensor_(nElement)(state, src1) == THCTensor_(nElement)(state, src2), 3, "sizes do not match"); if (self_ == src1) { // self /= src2 if (!THC_pointwiseApply2<scalar_t, scalar_t>(state, self_, src2, TensorLShiftOp<scalar_t>())) { THArgCheck(false, 2, CUTORCH_DIM_WARNING); } } else { THCTensor_(resizeAs)(state, self_, src1); // self = src1 / src2 if (!THC_pointwiseApply3<scalar_t, scalar_t, scalar_t>(state, self_, src1, src2, TensorLShiftOp<scalar_t>())) { THArgCheck(false, 2, CUTORCH_DIM_WARNING); } } THCudaCheck(cudaGetLastError()); #endif } void THCTensor_(crshift)(THCState* state, THCTensor *self_, THCTensor *src1, THCTensor *src2) { #if defined(THC_REAL_IS_HALF) return THError("crshift not supported for torch.CudaHalfTensor"); #else THAssert(THCTensor_(checkGPU)(state, 3, self_, src1, src2)); THArgCheck(THCTensor_(nElement)(state, src1) == THCTensor_(nElement)(state, src2), 3, "sizes do not match"); if (self_ == src1) { // self /= src2 if (!THC_pointwiseApply2<scalar_t, scalar_t>(state, self_, src2, TensorRShiftOp<scalar_t>())) { THArgCheck(false, 2, CUTORCH_DIM_WARNING); } } else { THCTensor_(resizeAs)(state, self_, src1); // self = src1 / src2 if (!THC_pointwiseApply3<scalar_t, scalar_t, scalar_t>(state, self_, src1, src2, TensorRShiftOp<scalar_t>())) { THArgCheck(false, 2, CUTORCH_DIM_WARNING); } } THCudaCheck(cudaGetLastError()); #endif } void THCTensor_(cremainder)(THCState *state, THCTensor *self, THCTensor *src1, THCTensor *src2) { THCAssertSameGPU(THCTensor_(checkGPU)(state, 3, self, src1, src2)); THArgCheck(THCTensor_(nElement)(state, src1) == THCTensor_(nElement)(state, src2), 2, "sizes do not match"); if (self == src1) { if (!THC_pointwiseApply2<scalar_t, scalar_t>(state, self, src2, TensorCRemainderOp<scalar_t>())) { THArgCheck(false, 2, CUTORCH_DIM_WARNING); } } else { THCTensor_(resizeAs)(state, self, src1); if (!THC_pointwiseApply3<scalar_t, scalar_t, scalar_t>(state, self, src1, src2, TensorCRemainderOp<scalar_t>())) { THArgCheck(false, 2, CUTORCH_DIM_WARNING); } } } void THCTensor_(cfmod)(THCState *state, THCTensor *self, THCTensor *src1, THCTensor *src2) { THCAssertSameGPU(THCTensor_(checkGPU)(state, 3, self, src1, src2)); THArgCheck(THCTensor_(nElement)(state, src1) == THCTensor_(nElement)(state, src2), 2, "sizes do not match"); if (self == src1) { if (!THC_pointwiseApply2<scalar_t, scalar_t>(state, self, src2, TensorCFmodOp<scalar_t>())) { THArgCheck(false, 2, CUTORCH_DIM_WARNING); } } else { THCTensor_(resizeAs)(state, self, src1); if (!THC_pointwiseApply3<scalar_t, scalar_t, scalar_t>(state, self, src1, src2, TensorCFmodOp<scalar_t>())) { THArgCheck(false, 2, CUTORCH_DIM_WARNING); } } } #endif #endif
f0e6a76aa871c4baa77d4980668ce825a3e86fad.hip
// !!! This is a file automatically generated by hipify!!! /* * Copyright 1993-2007 NVIDIA Corporation. All rights reserved. * * NOTICE TO USER: * * This source code is subject to NVIDIA ownership rights under U.S. and * international Copyright laws. Users and possessors of this source code * are hereby granted a nonexclusive, royalty-free license to use this code * in individual and commercial software. * * NVIDIA MAKES NO REPRESENTATION ABOUT THE SUITABILITY OF THIS SOURCE * CODE FOR ANY PURPOSE. IT IS PROVIDED "AS IS" WITHOUT EXPRESS OR * IMPLIED WARRANTY OF ANY KIND. NVIDIA DISCLAIMS ALL WARRANTIES WITH * REGARD TO THIS SOURCE CODE, INCLUDING ALL IMPLIED WARRANTIES OF * MERCHANTABILITY, NONINFRINGEMENT, AND FITNESS FOR A PARTICULAR PURPOSE. * IN NO EVENT SHALL NVIDIA BE LIABLE FOR ANY SPECIAL, INDIRECT, INCIDENTAL, * OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS * OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE * OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE * OR PERFORMANCE OF THIS SOURCE CODE. * * U.S. Government End Users. This source code is a "commercial item" as * that term is defined at 48 C.F.R. 2.101 (OCT 1995), consisting of * "commercial computer software" and "commercial computer software * documentation" as such terms are used in 48 C.F.R. 12.212 (SEPT 1995) * and is provided to the U.S. Government only as a commercial end item. * Consistent with 48 C.F.R.12.212 and 48 C.F.R. 227.7202-1 through * 227.7202-4 (JUNE 1995), all U.S. Government End Users acquire the * source code with only those rights set forth herein. * * Any use of this source code in individual and commercial software must * include, in the user documentation and internal comments to the code, * the above Disclaimer and U.S. Government End Users Notice. */ /* This sample queries the properties of the CUDA devices present in the system. */ // includes, system #include <stdlib.h> #include <stdio.h> #include <string.h> // includes, project #include <cutil.h> //////////////////////////////////////////////////////////////////////////////// // Program main //////////////////////////////////////////////////////////////////////////////// int main( int argc, char** argv) { int deviceCount; CUDA_SAFE_CALL(hipGetDeviceCount(&deviceCount)); if (deviceCount == 0) printf("There is no device supporting CUDA\n"); int dev; for (dev = 0; dev < deviceCount; ++dev) { hipDeviceProp_t deviceProp; CUDA_SAFE_CALL(hipGetDeviceProperties(&deviceProp, dev)); if (dev == 0) { if (deviceProp.major < 1) printf("There is no device supporting CUDA.\n"); else if (deviceCount == 1) printf("There is 1 device supporting CUDA\n"); else printf("There are %d devices supporting CUDA\n", deviceCount); } printf("\nDevice %d: \"%s\"\n", dev, deviceProp.name); printf(" Major revision number: %d\n", deviceProp.major); printf(" Minor revision number: %d\n", deviceProp.minor); printf(" Total amount of global memory: %d bytes\n", deviceProp.totalGlobalMem); printf(" Total amount of constant memory: %d bytes\n", deviceProp.totalConstMem); printf(" Total amount of shared memory per block: %d bytes\n", deviceProp.sharedMemPerBlock); printf(" Total number of registers available per block: %d\n", deviceProp.regsPerBlock); printf(" Warp size: %d\n", deviceProp.warpSize); printf(" Maximum number of threads per block: %d\n", deviceProp.maxThreadsPerBlock); printf(" Maximum sizes of each dimension of a block: %d x %d x %d\n", deviceProp.maxThreadsDim[0], deviceProp.maxThreadsDim[1], deviceProp.maxThreadsDim[2]); printf(" Maximum sizes of each dimension of a grid: %d x %d x %d\n", deviceProp.maxGridSize[0], deviceProp.maxGridSize[1], deviceProp.maxGridSize[2]); printf(" Maximum memory pitch: %d bytes\n", deviceProp.memPitch); printf(" Texture alignment: %d bytes\n", deviceProp.textureAlignment); printf(" Clock rate: %d kilohertz\n", deviceProp.clockRate); } printf("\nTest PASSED\n"); CUT_EXIT(argc, argv); }
f0e6a76aa871c4baa77d4980668ce825a3e86fad.cu
/* * Copyright 1993-2007 NVIDIA Corporation. All rights reserved. * * NOTICE TO USER: * * This source code is subject to NVIDIA ownership rights under U.S. and * international Copyright laws. Users and possessors of this source code * are hereby granted a nonexclusive, royalty-free license to use this code * in individual and commercial software. * * NVIDIA MAKES NO REPRESENTATION ABOUT THE SUITABILITY OF THIS SOURCE * CODE FOR ANY PURPOSE. IT IS PROVIDED "AS IS" WITHOUT EXPRESS OR * IMPLIED WARRANTY OF ANY KIND. NVIDIA DISCLAIMS ALL WARRANTIES WITH * REGARD TO THIS SOURCE CODE, INCLUDING ALL IMPLIED WARRANTIES OF * MERCHANTABILITY, NONINFRINGEMENT, AND FITNESS FOR A PARTICULAR PURPOSE. * IN NO EVENT SHALL NVIDIA BE LIABLE FOR ANY SPECIAL, INDIRECT, INCIDENTAL, * OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS * OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE * OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE * OR PERFORMANCE OF THIS SOURCE CODE. * * U.S. Government End Users. This source code is a "commercial item" as * that term is defined at 48 C.F.R. 2.101 (OCT 1995), consisting of * "commercial computer software" and "commercial computer software * documentation" as such terms are used in 48 C.F.R. 12.212 (SEPT 1995) * and is provided to the U.S. Government only as a commercial end item. * Consistent with 48 C.F.R.12.212 and 48 C.F.R. 227.7202-1 through * 227.7202-4 (JUNE 1995), all U.S. Government End Users acquire the * source code with only those rights set forth herein. * * Any use of this source code in individual and commercial software must * include, in the user documentation and internal comments to the code, * the above Disclaimer and U.S. Government End Users Notice. */ /* This sample queries the properties of the CUDA devices present in the system. */ // includes, system #include <stdlib.h> #include <stdio.h> #include <string.h> // includes, project #include <cutil.h> //////////////////////////////////////////////////////////////////////////////// // Program main //////////////////////////////////////////////////////////////////////////////// int main( int argc, char** argv) { int deviceCount; CUDA_SAFE_CALL(cudaGetDeviceCount(&deviceCount)); if (deviceCount == 0) printf("There is no device supporting CUDA\n"); int dev; for (dev = 0; dev < deviceCount; ++dev) { cudaDeviceProp deviceProp; CUDA_SAFE_CALL(cudaGetDeviceProperties(&deviceProp, dev)); if (dev == 0) { if (deviceProp.major < 1) printf("There is no device supporting CUDA.\n"); else if (deviceCount == 1) printf("There is 1 device supporting CUDA\n"); else printf("There are %d devices supporting CUDA\n", deviceCount); } printf("\nDevice %d: \"%s\"\n", dev, deviceProp.name); printf(" Major revision number: %d\n", deviceProp.major); printf(" Minor revision number: %d\n", deviceProp.minor); printf(" Total amount of global memory: %d bytes\n", deviceProp.totalGlobalMem); printf(" Total amount of constant memory: %d bytes\n", deviceProp.totalConstMem); printf(" Total amount of shared memory per block: %d bytes\n", deviceProp.sharedMemPerBlock); printf(" Total number of registers available per block: %d\n", deviceProp.regsPerBlock); printf(" Warp size: %d\n", deviceProp.warpSize); printf(" Maximum number of threads per block: %d\n", deviceProp.maxThreadsPerBlock); printf(" Maximum sizes of each dimension of a block: %d x %d x %d\n", deviceProp.maxThreadsDim[0], deviceProp.maxThreadsDim[1], deviceProp.maxThreadsDim[2]); printf(" Maximum sizes of each dimension of a grid: %d x %d x %d\n", deviceProp.maxGridSize[0], deviceProp.maxGridSize[1], deviceProp.maxGridSize[2]); printf(" Maximum memory pitch: %d bytes\n", deviceProp.memPitch); printf(" Texture alignment: %d bytes\n", deviceProp.textureAlignment); printf(" Clock rate: %d kilohertz\n", deviceProp.clockRate); } printf("\nTest PASSED\n"); CUT_EXIT(argc, argv); }
949be83910eed9e60eced8e35907d203031ebf1e.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include "includes.h" __global__ void computeSquare(int *d_in, int *d_out) { int index = threadIdx.x; d_out[index] = d_in[index] * d_in[index]; }
949be83910eed9e60eced8e35907d203031ebf1e.cu
#include "includes.h" __global__ void computeSquare(int *d_in, int *d_out) { int index = threadIdx.x; d_out[index] = d_in[index] * d_in[index]; }
e15f21096a0b4ff18e0bb5351e237d502880e690.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include<iostream> #define HANDLE_ERROR(ret) \ {\ if(ret != hipSuccess)\ std::cerr<<"cuda wrong"<<std::endl;\ } __global__ void add(int* a,int* b,int* c){ int idx=threadIdx.x; c[idx]=a[idx]+b[idx]; } int main(){ int a = 123; int b= 234; int c; int *dev_a,*dev_b,*dev_c; HANDLE_ERROR(hipMalloc(&dev_a,sizeof(int))); HANDLE_ERROR(hipMalloc(&dev_b,sizeof(int))); HANDLE_ERROR(hipMalloc(&dev_c,sizeof(int))); HANDLE_ERROR(hipMemcpy(dev_a,&a,sizeof(int),hipMemcpyHostToDevice)); HANDLE_ERROR(hipMemcpy(dev_b,&b,sizeof(int),hipMemcpyHostToDevice)); hipLaunchKernelGGL(( add), dim3(1),dim3(3),0, 0, dev_a,dev_b,dev_c); HANDLE_ERROR(hipMemcpy(&c,dev_c,sizeof(int),hipMemcpyDeviceToHost)); hipFree(dev_a); hipFree(dev_b); hipFree(dev_c); std::cout<<c; }
e15f21096a0b4ff18e0bb5351e237d502880e690.cu
#include<iostream> #define HANDLE_ERROR(ret) \ {\ if(ret != cudaSuccess)\ std::cerr<<"cuda wrong"<<std::endl;\ } __global__ void add(int* a,int* b,int* c){ int idx=threadIdx.x; c[idx]=a[idx]+b[idx]; } int main(){ int a = 123; int b= 234; int c; int *dev_a,*dev_b,*dev_c; HANDLE_ERROR(cudaMalloc(&dev_a,sizeof(int))); HANDLE_ERROR(cudaMalloc(&dev_b,sizeof(int))); HANDLE_ERROR(cudaMalloc(&dev_c,sizeof(int))); HANDLE_ERROR(cudaMemcpy(dev_a,&a,sizeof(int),cudaMemcpyHostToDevice)); HANDLE_ERROR(cudaMemcpy(dev_b,&b,sizeof(int),cudaMemcpyHostToDevice)); add<<<1,3,0>>>(dev_a,dev_b,dev_c); HANDLE_ERROR(cudaMemcpy(&c,dev_c,sizeof(int),cudaMemcpyDeviceToHost)); cudaFree(dev_a); cudaFree(dev_b); cudaFree(dev_c); std::cout<<c; }
24c753feed7ff281cbc269581b0e5e32b9b5183e.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /* Iterated Conditional Modes (ICM) binary image restoration * * Created by Jerod Weinman, 9 June 2008 * Revised 15 August 2012 to include timing * Revised 13 August 2014 to use bitmap library and safely verify user input */ #include "cubitmap.h" #include "bitmap.h" #include <stdio.h> #include <sys/time.h> #include <string.h> #define DEFAULT_ALPHA 2 #define DEFAULT_BETA 1 #define MAX_ITER 20 #define TILE_WIDTH 4 __device__ int alpha_d; __device__ int beta_d; __device__ int rows_d; __device__ int cols_d; __device__ int MAX_ITER_D; /* icmupdate - Calculate one ICM update on an image * * Produces * converged, an int * * Preconditions: * img is the original binary image * restImg is the current restored image result * workImg is a buffer for storing the update * img, restImg, and workImg are all the same size * * Postconditions: * update is stored in workImg * converged==1 indicates update yielded no change from restImg */ /* * Device update function */ __device__ int icmupdate(bit*img_d, bit * restImg_d, bit* workImg_d,int alpha,int beta, int r,int c,int rows,int cols) { int converged = 1; /* Flag indicating whether algorithm has converged */ float cost[2]; /* cost array for both binary states at a pixel */ bit *restBits = restImg_d; bit *workBits = workImg_d; // int alpha = alpha_d; // int beta = beta_d; int index = r*cols + c; /* Pixel linear index */ cost[0] = 0; /* Initialize costs to zero */ cost[1] = 0; /* Local cost: for flipping pixel (r,c). This assigns the local * cost alpha to the opposite state of that at (r,c) */ cost[1-img_d[index]] = alpha; /* Neighborhood cost: Adds beta to the cost for the opposite * value of each neighboring state. Note that the neighboring * state values used are from the most recent iteration of the * restored image. Also, ordered to maximize TLB hits. */ if (r>0) { cost[1-restBits[index-cols]] += beta; /* North */ if (c > 0) cost[1-restBits[index-cols-1]] += beta; /* NorthWest */ if (c < cols-1) cost[1-restBits[index-cols+1]] += beta; /* NorthEast */ } if (c > 0) cost[1-restBits[index-1]] += beta; /* West */ if (c < cols-1) cost[1-restBits[index+1]] += beta; /* East */ if (r < rows-1) { cost[1-restBits[index+cols]] += beta; /* South */ if (c > 0) cost[1-restBits[index+cols-1]] += beta; /* SouthWest */ if (c < cols-1) cost[1-restBits[index+cols+1]] += beta; /* SouthEast */ } /* Assign whichever state has lower cost to the intermediate * "working" restored image */ workBits[index] = (cost[0] > cost[1]); /* If we still think we are converging, check whether the new * value (in workImg) differs from our previous restored * value. If they differ, we have not converged */ if (converged && workBits[index]!=restBits[index]) converged = 0; return converged; } /* * Kernel performing the icm Updates for each subset of pixels * img_d is the original image on the device * workImg_d is the work image on the device * restImg_d is the rest image on the device */ __global__ void icmKernel(bit *img_d, bit* restImg_d, bit *workImg_d, int alpha,int beta, int rows, int cols) { /* Identify particular threads */ int r = blockIdx.y * TILE_WIDTH + threadIdx.y; /* Pixel row */ int c = blockIdx.x * TILE_WIDTH + threadIdx.x; /* Pixel col */ int converged =0; int iter; /* Iteration loop counter */ for (iter=0 ; iter<MAX_ITER ; iter++) /* Iterate update/restoration */ { __syncthreads(); converged = icmupdate (img_d,restImg_d, workImg_d,alpha,beta,r,c,rows,cols); /* Update */ __syncthreads(); if( ( r*cols + c) == 0) { printf("%d,%d\n",threadIdx.x,threadIdx.y); memcpy(restImg_d,workImg_d,sizeof(bit)* rows * cols);//copy working bits to restbits } // if (converged) // break; /* Nothing changed, so we are done and can exit the loop */ // else // bmcopy(restImg_h,workImg_h); /* All pixels updated; copy working buffer */ } } /* runicm - Run the ICM algorthm with parameters alpha and beta on an image * * Preconditions: * img is the original binary image * restImg is an already allocated buffer for the restored image result * img and restImg are the same size * * Postconditions: * ICM is run on img until convergence or an iteration limit is reached * Result is stored in restImg buffer */ __host__ void runicm(const bm_t *img_h, bm_t *restImg_h, double alpha, double beta) { bm_t *workImg_h; /* An image buffer for storing intermediate results */ int converged; /* Convergence flag for testing */ int iter = MAX_ITER; /* Iteration loop counter */ int rows = img_h->rows; int cols = img_h->cols; workImg_h = (bm_t*) malloc (sizeof(bm_t)); /* Allocate bitmap struct */ if (workImg_h==NULL) { fprintf(stderr,"Unable to allocate work image"); exit(EXIT_FAILURE); } if ( bmalloc(workImg_h, img_h->rows, img_h->cols) < 0 ) /* Allocate work image */ exit(EXIT_FAILURE); if ( bmcopy(restImg_h, img_h) < 0 ) /* Copy original into restored */ exit(EXIT_FAILURE); /* Allocate space on device */ bit* img_d = cu_bmalloc(rows, cols); bit* restImg_d = cu_bmalloc(rows, cols); bit* workImg_d = cu_bmalloc(rows, cols); /* check if memory allocation was successful*/ if (img_d==NULL || restImg_d==NULL || workImg_d==NULL) { fprintf(stderr,"Error allocating device image memory\n"); exit(EXIT_FAILURE); } /* Copy image data to the device */ if ( hd_bmcopy(img_d, img_h) < 0 ) /* Copy image to device */ { fprintf(stderr,"Error copying original image to device\n"); exit(EXIT_FAILURE); } if ( hd_bmcopy(restImg_d, restImg_h) < 0 ) /* Copy image to device */ { fprintf(stderr,"Error copying rest image to device\n"); exit(EXIT_FAILURE); } if ( hd_bmcopy(workImg_d, workImg_h) < 0 ) /* Copy image to device */ { fprintf(stderr,"Error copying work image to device\n"); exit(EXIT_FAILURE); } /* Allocate global constants on device*/ hipError_t rc; // Copy value of alpha on host into alpha_d on device rc = hipMemcpyToSymbol( alpha_d, &alpha, sizeof(int), 0, hipMemcpyHostToDevice); if (rc != hipSuccess) fprintf(stderr,"Unable to copy alpha value to device: %s", hipGetErrorString(rc) ); // Copy value of alpha on host into alpha_d on device rc = hipMemcpyToSymbol( beta_d, &beta, sizeof(int), 0, hipMemcpyHostToDevice); if (rc != hipSuccess) fprintf(stderr,"Unable to copy beta value to device: %s", hipGetErrorString(rc) ); // Copy value of MAX_ITER on host into MAX_ITER_d on device rc = hipMemcpyToSymbol( MAX_ITER_D, &iter, sizeof(int), 0, hipMemcpyHostToDevice); if (rc != hipSuccess) fprintf(stderr,"Unable to copy beta value to device: %s", hipGetErrorString(rc) ); /* Set up execution configuration */ dim3 dimGrid( cols / TILE_WIDTH + 1, rows / TILE_WIDTH + 1 ); dim3 dimBlock( TILE_WIDTH, TILE_WIDTH ); /* Invoke Kernel*/ hipLaunchKernelGGL(( icmKernel), dim3(dimGrid),dim3(dimBlock), 0, 0, img_d,restImg_d,workImg_d,alpha,beta,rows,cols); /* * Copy restored image from device to host */ if (dh_bmcopy(restImg_h, restImg_d)<0) /* Copy inverted image to host */ { fprintf(stderr,"Error copying restImg_d to host"); exit(EXIT_FAILURE); } /* free device memory */ if (hipFree( img_d ) != hipSuccess || /* Free device memory */ hipFree( restImg_d ) != hipSuccess || hipFree( workImg_d ) != hipSuccess ) { fprintf(stderr,"Error releasing device memory\n"); } // for (iter=0 ; iter<MAX_ITER ; iter++) /* Iterate update/restoration */ // { // converged = icmupdate (img_h, restImg_h, workImg_h, alpha, beta); /* Update */ // if (converged) // break; /* Nothing changed, so we are done and can exit the loop */ // else // bmcopy(restImg_h,workImg_h); /* All pixels updated; copy working buffer */ // } bmfree(workImg_h); /* Free our temporary work image buffer */ free(workImg_h); } /* Run the ICM algorithm, write the result, and print the run time */ __host__ void run_time_icm(const char* origFile, const char* cleanFile, double alpha, double beta) { int result; /* Return value of gettimeofday for testing */ struct timeval start,end, diff; /* Clock and interval measurements */ bm_t origImg, cleanImg; /* Input and output images */ if (bmread(origFile, &origImg)<0) /* Read input image */ exit (EXIT_FAILURE); if (bmalloc(&cleanImg, origImg.rows, origImg.cols) < 0) /* Allocate cleaned */ exit (EXIT_FAILURE); if ( gettimeofday(&start, NULL) ) /* Get start time */ { perror("Could not get start time"); exit (EXIT_FAILURE); } runicm(&origImg, &cleanImg, alpha, beta); /* Run restoration algorithm */ if( gettimeofday(&end, NULL) ) /* Get end time */ { perror("Could not get end time"); exit (EXIT_FAILURE); } timersub(&end, &start, &diff); /* Calculate time interval */ printf("%u.%06u\n",diff.tv_sec,diff.tv_usec); /* Print time (sec) */ if (bmwrite(cleanFile, &cleanImg)<0) /* Write restored image */ exit (EXIT_FAILURE); bmfree(&cleanImg); /* Free our allocated image */ } /* String to float conversion. * Preconditions: * All of str is the float * str is a null-terminated character array * cmd is a null-terminated character array * Postconditions: Prints an error message of the form * "cmd: name str must be a number" and exits the program with a failure when * the first precondition is violated. Otherwise returns the parsed number. */ __host__ float estrtof(char* str, char* cmd, const char* name ) { char* endPtr; /* First unparseable character (for verification) */ float num = strtof(str, &endPtr); if ( (endPtr-str) != strlen(str) ) /* Verify entire string was parsed */ { fprintf(stderr,"%s: %s %s must be a number\n",cmd,name,str); exit (EXIT_FAILURE); } return num; } /* Verify and process command line arguments. * Prints an error message and exits if any prove immediately problematic */ __host__ void processArguments(int argc, char* argv[], char** origFile, char** cleanFile, float *alpha, float *beta) { if (argc<3 || argc>5) /* Verify optional argument count */ { fprintf(stderr,"Usage: %s input output [alpha] [beta]\n", argv[0]); exit (EXIT_FAILURE); } *origFile = argv[1]; *cleanFile = argv[2]; if (argc>3) /* Safely process optional alpha arguments */ *alpha = estrtof(argv[3],argv[0],"alpha"); else *alpha = DEFAULT_ALPHA; /* No option given, take the default */ if (argc>4) /* Safely process optional alpha arguments */ *beta = estrtof(argv[4],argv[0],"beta"); else *beta = DEFAULT_BETA; /* No option given, take the default */ } /* Main program: Process user input and calculate ICM result * Usage: ./icm input output [alpha] [beta] */ int main(int argc, char* argv[]) { char *origFile, *cleanFile; /* Input and output file names */ float alpha,beta; /* ICM algorithm parameters */ processArguments(argc,argv,&origFile,&cleanFile,&alpha,&beta); run_time_icm(origFile, cleanFile, alpha, beta); /* Time and write result */ exit (EXIT_SUCCESS); /* Exit cleanly */ }
24c753feed7ff281cbc269581b0e5e32b9b5183e.cu
/* Iterated Conditional Modes (ICM) binary image restoration * * Created by Jerod Weinman, 9 June 2008 * Revised 15 August 2012 to include timing * Revised 13 August 2014 to use bitmap library and safely verify user input */ #include "cubitmap.h" #include "bitmap.h" #include <stdio.h> #include <sys/time.h> #include <string.h> #define DEFAULT_ALPHA 2 #define DEFAULT_BETA 1 #define MAX_ITER 20 #define TILE_WIDTH 4 __device__ int alpha_d; __device__ int beta_d; __device__ int rows_d; __device__ int cols_d; __device__ int MAX_ITER_D; /* icmupdate - Calculate one ICM update on an image * * Produces * converged, an int * * Preconditions: * img is the original binary image * restImg is the current restored image result * workImg is a buffer for storing the update * img, restImg, and workImg are all the same size * * Postconditions: * update is stored in workImg * converged==1 indicates update yielded no change from restImg */ /* * Device update function */ __device__ int icmupdate(bit*img_d, bit * restImg_d, bit* workImg_d,int alpha,int beta, int r,int c,int rows,int cols) { int converged = 1; /* Flag indicating whether algorithm has converged */ float cost[2]; /* cost array for both binary states at a pixel */ bit *restBits = restImg_d; bit *workBits = workImg_d; // int alpha = alpha_d; // int beta = beta_d; int index = r*cols + c; /* Pixel linear index */ cost[0] = 0; /* Initialize costs to zero */ cost[1] = 0; /* Local cost: for flipping pixel (r,c). This assigns the local * cost alpha to the opposite state of that at (r,c) */ cost[1-img_d[index]] = alpha; /* Neighborhood cost: Adds beta to the cost for the opposite * value of each neighboring state. Note that the neighboring * state values used are from the most recent iteration of the * restored image. Also, ordered to maximize TLB hits. */ if (r>0) { cost[1-restBits[index-cols]] += beta; /* North */ if (c > 0) cost[1-restBits[index-cols-1]] += beta; /* NorthWest */ if (c < cols-1) cost[1-restBits[index-cols+1]] += beta; /* NorthEast */ } if (c > 0) cost[1-restBits[index-1]] += beta; /* West */ if (c < cols-1) cost[1-restBits[index+1]] += beta; /* East */ if (r < rows-1) { cost[1-restBits[index+cols]] += beta; /* South */ if (c > 0) cost[1-restBits[index+cols-1]] += beta; /* SouthWest */ if (c < cols-1) cost[1-restBits[index+cols+1]] += beta; /* SouthEast */ } /* Assign whichever state has lower cost to the intermediate * "working" restored image */ workBits[index] = (cost[0] > cost[1]); /* If we still think we are converging, check whether the new * value (in workImg) differs from our previous restored * value. If they differ, we have not converged */ if (converged && workBits[index]!=restBits[index]) converged = 0; return converged; } /* * Kernel performing the icm Updates for each subset of pixels * img_d is the original image on the device * workImg_d is the work image on the device * restImg_d is the rest image on the device */ __global__ void icmKernel(bit *img_d, bit* restImg_d, bit *workImg_d, int alpha,int beta, int rows, int cols) { /* Identify particular threads */ int r = blockIdx.y * TILE_WIDTH + threadIdx.y; /* Pixel row */ int c = blockIdx.x * TILE_WIDTH + threadIdx.x; /* Pixel col */ int converged =0; int iter; /* Iteration loop counter */ for (iter=0 ; iter<MAX_ITER ; iter++) /* Iterate update/restoration */ { __syncthreads(); converged = icmupdate (img_d,restImg_d, workImg_d,alpha,beta,r,c,rows,cols); /* Update */ __syncthreads(); if( ( r*cols + c) == 0) { printf("%d,%d\n",threadIdx.x,threadIdx.y); memcpy(restImg_d,workImg_d,sizeof(bit)* rows * cols);//copy working bits to restbits } // if (converged) // break; /* Nothing changed, so we are done and can exit the loop */ // else // bmcopy(restImg_h,workImg_h); /* All pixels updated; copy working buffer */ } } /* runicm - Run the ICM algorthm with parameters alpha and beta on an image * * Preconditions: * img is the original binary image * restImg is an already allocated buffer for the restored image result * img and restImg are the same size * * Postconditions: * ICM is run on img until convergence or an iteration limit is reached * Result is stored in restImg buffer */ __host__ void runicm(const bm_t *img_h, bm_t *restImg_h, double alpha, double beta) { bm_t *workImg_h; /* An image buffer for storing intermediate results */ int converged; /* Convergence flag for testing */ int iter = MAX_ITER; /* Iteration loop counter */ int rows = img_h->rows; int cols = img_h->cols; workImg_h = (bm_t*) malloc (sizeof(bm_t)); /* Allocate bitmap struct */ if (workImg_h==NULL) { fprintf(stderr,"Unable to allocate work image"); exit(EXIT_FAILURE); } if ( bmalloc(workImg_h, img_h->rows, img_h->cols) < 0 ) /* Allocate work image */ exit(EXIT_FAILURE); if ( bmcopy(restImg_h, img_h) < 0 ) /* Copy original into restored */ exit(EXIT_FAILURE); /* Allocate space on device */ bit* img_d = cu_bmalloc(rows, cols); bit* restImg_d = cu_bmalloc(rows, cols); bit* workImg_d = cu_bmalloc(rows, cols); /* check if memory allocation was successful*/ if (img_d==NULL || restImg_d==NULL || workImg_d==NULL) { fprintf(stderr,"Error allocating device image memory\n"); exit(EXIT_FAILURE); } /* Copy image data to the device */ if ( hd_bmcopy(img_d, img_h) < 0 ) /* Copy image to device */ { fprintf(stderr,"Error copying original image to device\n"); exit(EXIT_FAILURE); } if ( hd_bmcopy(restImg_d, restImg_h) < 0 ) /* Copy image to device */ { fprintf(stderr,"Error copying rest image to device\n"); exit(EXIT_FAILURE); } if ( hd_bmcopy(workImg_d, workImg_h) < 0 ) /* Copy image to device */ { fprintf(stderr,"Error copying work image to device\n"); exit(EXIT_FAILURE); } /* Allocate global constants on device*/ cudaError_t rc; // Copy value of alpha on host into alpha_d on device rc = cudaMemcpyToSymbol( alpha_d, &alpha, sizeof(int), 0, cudaMemcpyHostToDevice); if (rc != cudaSuccess) fprintf(stderr,"Unable to copy alpha value to device: %s", cudaGetErrorString(rc) ); // Copy value of alpha on host into alpha_d on device rc = cudaMemcpyToSymbol( beta_d, &beta, sizeof(int), 0, cudaMemcpyHostToDevice); if (rc != cudaSuccess) fprintf(stderr,"Unable to copy beta value to device: %s", cudaGetErrorString(rc) ); // Copy value of MAX_ITER on host into MAX_ITER_d on device rc = cudaMemcpyToSymbol( MAX_ITER_D, &iter, sizeof(int), 0, cudaMemcpyHostToDevice); if (rc != cudaSuccess) fprintf(stderr,"Unable to copy beta value to device: %s", cudaGetErrorString(rc) ); /* Set up execution configuration */ dim3 dimGrid( cols / TILE_WIDTH + 1, rows / TILE_WIDTH + 1 ); dim3 dimBlock( TILE_WIDTH, TILE_WIDTH ); /* Invoke Kernel*/ icmKernel<<<dimGrid,dimBlock>>>(img_d,restImg_d,workImg_d,alpha,beta,rows,cols); /* * Copy restored image from device to host */ if (dh_bmcopy(restImg_h, restImg_d)<0) /* Copy inverted image to host */ { fprintf(stderr,"Error copying restImg_d to host"); exit(EXIT_FAILURE); } /* free device memory */ if (cudaFree( img_d ) != cudaSuccess || /* Free device memory */ cudaFree( restImg_d ) != cudaSuccess || cudaFree( workImg_d ) != cudaSuccess ) { fprintf(stderr,"Error releasing device memory\n"); } // for (iter=0 ; iter<MAX_ITER ; iter++) /* Iterate update/restoration */ // { // converged = icmupdate (img_h, restImg_h, workImg_h, alpha, beta); /* Update */ // if (converged) // break; /* Nothing changed, so we are done and can exit the loop */ // else // bmcopy(restImg_h,workImg_h); /* All pixels updated; copy working buffer */ // } bmfree(workImg_h); /* Free our temporary work image buffer */ free(workImg_h); } /* Run the ICM algorithm, write the result, and print the run time */ __host__ void run_time_icm(const char* origFile, const char* cleanFile, double alpha, double beta) { int result; /* Return value of gettimeofday for testing */ struct timeval start,end, diff; /* Clock and interval measurements */ bm_t origImg, cleanImg; /* Input and output images */ if (bmread(origFile, &origImg)<0) /* Read input image */ exit (EXIT_FAILURE); if (bmalloc(&cleanImg, origImg.rows, origImg.cols) < 0) /* Allocate cleaned */ exit (EXIT_FAILURE); if ( gettimeofday(&start, NULL) ) /* Get start time */ { perror("Could not get start time"); exit (EXIT_FAILURE); } runicm(&origImg, &cleanImg, alpha, beta); /* Run restoration algorithm */ if( gettimeofday(&end, NULL) ) /* Get end time */ { perror("Could not get end time"); exit (EXIT_FAILURE); } timersub(&end, &start, &diff); /* Calculate time interval */ printf("%u.%06u\n",diff.tv_sec,diff.tv_usec); /* Print time (sec) */ if (bmwrite(cleanFile, &cleanImg)<0) /* Write restored image */ exit (EXIT_FAILURE); bmfree(&cleanImg); /* Free our allocated image */ } /* String to float conversion. * Preconditions: * All of str is the float * str is a null-terminated character array * cmd is a null-terminated character array * Postconditions: Prints an error message of the form * "cmd: name str must be a number" and exits the program with a failure when * the first precondition is violated. Otherwise returns the parsed number. */ __host__ float estrtof(char* str, char* cmd, const char* name ) { char* endPtr; /* First unparseable character (for verification) */ float num = strtof(str, &endPtr); if ( (endPtr-str) != strlen(str) ) /* Verify entire string was parsed */ { fprintf(stderr,"%s: %s %s must be a number\n",cmd,name,str); exit (EXIT_FAILURE); } return num; } /* Verify and process command line arguments. * Prints an error message and exits if any prove immediately problematic */ __host__ void processArguments(int argc, char* argv[], char** origFile, char** cleanFile, float *alpha, float *beta) { if (argc<3 || argc>5) /* Verify optional argument count */ { fprintf(stderr,"Usage: %s input output [alpha] [beta]\n", argv[0]); exit (EXIT_FAILURE); } *origFile = argv[1]; *cleanFile = argv[2]; if (argc>3) /* Safely process optional alpha arguments */ *alpha = estrtof(argv[3],argv[0],"alpha"); else *alpha = DEFAULT_ALPHA; /* No option given, take the default */ if (argc>4) /* Safely process optional alpha arguments */ *beta = estrtof(argv[4],argv[0],"beta"); else *beta = DEFAULT_BETA; /* No option given, take the default */ } /* Main program: Process user input and calculate ICM result * Usage: ./icm input output [alpha] [beta] */ int main(int argc, char* argv[]) { char *origFile, *cleanFile; /* Input and output file names */ float alpha,beta; /* ICM algorithm parameters */ processArguments(argc,argv,&origFile,&cleanFile,&alpha,&beta); run_time_icm(origFile, cleanFile, alpha, beta); /* Time and write result */ exit (EXIT_SUCCESS); /* Exit cleanly */ }
3a3d226304b2e9a3eaa2ff3097a7351f53d649bc.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime_api.h" #include <vector> namespace CudaHelpers { template<typename T> bool copy_vector_to_gpu(T* gpu_mem, const std::vector<T>& vec){ hipError_t err; err = hipMalloc((void**)&gpu_mem, vec.size() * (size_t)sizeof(T)); hipMemcpy((void*)gpu_mem, (void*)vec.data(), vec.size() * (size_t)sizeof(T), hipMemcpyHostToDevice); return err; } template<typename T> bool retrieve_vector_from_gpu(T* gpu_mem, const std::vector<T>& vec) { return hipMemcpy((void*)vec.data(), gpu_mem, vec.size() * (size_t)sizeof(T), hipMemcpyDeviceToHost); } }
3a3d226304b2e9a3eaa2ff3097a7351f53d649bc.cu
#include "cuda_runtime_api.h" #include <vector> namespace CudaHelpers { template<typename T> bool copy_vector_to_gpu(T* gpu_mem, const std::vector<T>& vec){ cudaError_t err; err = cudaMalloc((void**)&gpu_mem, vec.size() * (size_t)sizeof(T)); cudaMemcpy((void*)gpu_mem, (void*)vec.data(), vec.size() * (size_t)sizeof(T), cudaMemcpyHostToDevice); return err; } template<typename T> bool retrieve_vector_from_gpu(T* gpu_mem, const std::vector<T>& vec) { return cudaMemcpy((void*)vec.data(), gpu_mem, vec.size() * (size_t)sizeof(T), cudaMemcpyDeviceToHost); } }
519519f5c26f2b30deb95cd43c3df3b9a3cdf9c4.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include <cstdlib> #include <cstdio> #include <cmath> #include <fstream> #include <vector> #include <iostream> #include <cassert> #include <algorithm> #include <ctime> #if defined __linux__ || defined __APPLE__ // "Compiled for Linux #else // Windows doesn't define these values by default, Linux does #define M_PI 3.141592653589793 #endif #ifdef __HIPCC__ #define CUDA_CALLABLE_MEMBER __host__ __device__ #else #define CUDA_CALLABLE_MEMBER #endif template<typename T> class Vec3 { public: T x, y, z; CUDA_CALLABLE_MEMBER Vec3() : x(T(0)), y(T(0)), z(T(0)) {} CUDA_CALLABLE_MEMBER Vec3(T xx) : x(xx), y(xx), z(xx) {} CUDA_CALLABLE_MEMBER Vec3(T xx, T yy, T zz) : x(xx), y(yy), z(zz) {} CUDA_CALLABLE_MEMBER Vec3& normalize() { T nor2 = length2(); if (nor2 > 0) { T invNor = 1 / sqrt(nor2); x *= invNor, y *= invNor, z *= invNor; } return *this; } CUDA_CALLABLE_MEMBER Vec3<T> operator * (const T &f) const { return Vec3<T>(x * f, y * f, z * f); } CUDA_CALLABLE_MEMBER Vec3<T> operator * (const Vec3<T> &v) const { return Vec3<T>(x * v.x, y * v.y, z * v.z); } CUDA_CALLABLE_MEMBER T dot(const Vec3<T> &v) const { return x * v.x + y * v.y + z * v.z; } CUDA_CALLABLE_MEMBER Vec3<T> operator - (const Vec3<T> &v) const { return Vec3<T>(x - v.x, y - v.y, z - v.z); } CUDA_CALLABLE_MEMBER Vec3<T> operator + (const Vec3<T> &v) const { return Vec3<T>(x + v.x, y + v.y, z + v.z); } CUDA_CALLABLE_MEMBER Vec3<T>& operator += (const Vec3<T> &v) { x += v.x, y += v.y, z += v.z; return *this; } CUDA_CALLABLE_MEMBER Vec3<T>& operator *= (const Vec3<T> &v) { x *= v.x, y *= v.y, z *= v.z; return *this; } CUDA_CALLABLE_MEMBER Vec3<T> operator - () const { return Vec3<T>(-x, -y, -z); } CUDA_CALLABLE_MEMBER T length2() const { return x * x + y * y + z * z; } CUDA_CALLABLE_MEMBER T length() const { return sqrt(length2()); } friend std::ostream & operator << (std::ostream &os, const Vec3<T> &v) { os << "[" << v.x << " " << v.y << " " << v.z << "]"; return os; } }; typedef Vec3<float> Vec3f; class Sphere { public: Vec3f center; /// position of the sphere float radius, radius2; /// sphere radius and radius^2 Vec3f surfaceColor, emissionColor; /// surface color and emission (light) float transparency, reflection; /// surface transparency and reflectivity Sphere(){} Sphere( const Vec3f &c, const float &r, const Vec3f &sc, const float &refl = 0, const float &transp = 0, const Vec3f &ec = 0) : center(c), radius(r), radius2(r * r), surfaceColor(sc), emissionColor(ec), transparency(transp), reflection(refl) { /* empty */ } // Compute a ray-sphere intersection using the geometric solution CUDA_CALLABLE_MEMBER bool intersect(const Vec3f &rayorig, const Vec3f &raydir, float &t0, float &t1) const { Vec3f l = center - rayorig; float tca = l.dot(raydir); if (tca < 0) return false; float d2 = l.dot(l) - tca * tca; if (d2 > radius2) return false; float thc = sqrt(radius2 - d2); t0 = tca - thc; t1 = tca + thc; return true; } }; // This variable controls the maximum recursion depth #define MAX_RAY_DEPTH 5 CUDA_CALLABLE_MEMBER float mix(const float &a, const float &b, const float &mix) { return b * mix + a * (1 - mix); } // This is the main trace function. It takes a ray as argument (defined by its origin // and direction). We test if this ray intersects any of the geometry in the scene. // If the ray intersects an object, we compute the intersection point, the normal // at the intersection point, and shade this point using this information. // Shading depends on the surface property (is it transparent, reflective, diffuse). // The function returns a color for the ray. If the ray intersects an object that // is the color of the object at the intersection point, otherwise it returns // the background color. CUDA_CALLABLE_MEMBER Vec3f trace( const Vec3f &rayorig, const Vec3f &raydir, Sphere *spheres, int spheresSize, const int &depth) { //if (raydir.length() != 1) std::cerr << "Error " << raydir << std::endl; float tnear = INFINITY; const Sphere* sphere = NULL; // find intersection of this ray with the sphere in the scene for (int i = 0; i < spheresSize; ++i) { float t0 = INFINITY, t1 = INFINITY; if (spheres[i].intersect(rayorig, raydir, t0, t1)) { if (t0 < 0) t0 = t1; if (t0 < tnear) { tnear = t0; sphere = &spheres[i]; } } } // if there's no intersection return black or background color if (!sphere) return Vec3f(2); Vec3f surfaceColor = 0; // color of the ray/surfaceof the object intersected by the ray Vec3f phit = rayorig + raydir * tnear; // point of intersection Vec3f nhit = phit - sphere->center; // normal at the intersection point nhit.normalize(); // normalize normal direction // If the normal and the view direction are not opposite to each other // reverse the normal direction. That also means we are inside the sphere so set // the inside bool to true. Finally reverse the sign of IdotN which we want // positive. float bias = 1e-4; // add some bias to the point from which we will be tracing bool inside = false; if (raydir.dot(nhit) > 0) nhit = -nhit, inside = true; // it's a diffuse object, no need to raytrace any further for (int i = 0; i < spheresSize; ++i) { if (spheres[i].emissionColor.x > 0) { // this is a light Vec3f transmission = 1; Vec3f lightDirection = spheres[i].center - phit; lightDirection.normalize(); for (int j = 0; j < spheresSize; ++j) { if (i != j) { float t0, t1; if (spheres[j].intersect(phit + nhit * bias, lightDirection, t0, t1)) { transmission = 0; break; } } } surfaceColor += sphere->surfaceColor * transmission * max(float(0), nhit.dot(lightDirection)) * spheres[i].emissionColor; } } return surfaceColor + sphere->emissionColor; } __global__ void fill_image(Sphere *spheres, int spheresSize, Vec3f *image, int width, int height) { int index = blockIdx.x * blockDim.x + threadIdx.x; int stride = blockDim.x * gridDim.x; float invWidth = 1 / float(width), invHeight = 1 / float(height); float fov = 30; float angle = tan(M_PI * 0.5 * fov / 180.); float aspectratio = width / float(height); for (int i = index; i < width * height; i += stride) { int y = i / width; int x = i - y * width; float xx = (2 * ((x + 0.5) * invWidth) - 1) * angle * aspectratio; float yy = (1 - 2 * ((y + 0.5) * invHeight)) * angle; Vec3f raydir(xx, yy, -1); raydir.normalize(); image[i] = trace(Vec3f(0), raydir, spheres, spheresSize, 0); } } // Main rendering function. We compute a camera ray for each pixel of the image // trace it and return a color. If the ray hits a sphere, we return the color of the // sphere at the intersection point, else we return the background color. void render(Sphere *spheres, int spheresSize) { unsigned width = 640, height = 480; int blockSize = 256; int numBlocks = (width * height + blockSize - 1) / blockSize; Vec3f *image; hipMallocManaged(&image, height * width * sizeof(Vec3f)); // Trace rays clock_t begin = clock(); hipLaunchKernelGGL(( fill_image), dim3(numBlocks), dim3(blockSize), 0, 0, spheres, spheresSize, image, width, height); hipDeviceSynchronize(); clock_t end = clock(); printf("Elapsed time: %lf\n", double(end - begin) / CLOCKS_PER_SEC); // Save result to a PPM image (keep these flags if you compile under Windows) std::ofstream ofs("./untitled.ppm", std::ios::out | std::ios::binary); ofs << "P6\n" << width << " " << height << "\n255\n"; for (unsigned i = 0; i < width * height; ++i) { ofs << (unsigned char)(::min(float(1), image[i].x) * 255) << (unsigned char)(::min(float(1), image[i].y) * 255) << (unsigned char)(::min(float(1), image[i].z) * 255); } ofs.close(); hipFree(image); } // In the main function, we will create the scene which is composed of 5 spheres // and 1 light (which is also a sphere). Then, once the scene description is complete // we render that scene, by calling the render() function. int main(int argc, char **argv) { srand(13); int spheresSize = 7; Sphere *spheres; hipMallocManaged(&spheres, spheresSize * sizeof(Sphere)); // position, radius, surface color, reflectivity, transparency, emission color spheres[0] = Sphere(Vec3f( 0.0, -10004, -20), 10000, Vec3f(0.20, 0.20, 0.20), 0, 0.0); spheres[1] = Sphere(Vec3f( 0.0, 0, -20), 4, Vec3f(1.00, 0.32, 0.36), 1, 0.5); spheres[2] = Sphere(Vec3f( 5.0, -1, -15), 2, Vec3f(0.90, 0.76, 0.46), 1, 0.0); spheres[3] = Sphere(Vec3f( 5.0, 0, -25), 3, Vec3f(0.65, 0.77, 0.97), 1, 0.0); spheres[4] = Sphere(Vec3f(-5.5, 0, -15), 3, Vec3f(0.90, 0.90, 0.90), 1, 0.0); spheres[5] = Sphere(Vec3f(3, 5, -30), 1, Vec3f(0.3, 0.50, 0.20), 1, 0.0); // light spheres[6] = Sphere(Vec3f( 0.0, 20, -30), 3, Vec3f(0.00, 0.00, 0.00), 0, 0.0, Vec3f(3)); render(spheres, spheresSize); hipFree(spheres); return 0; }
519519f5c26f2b30deb95cd43c3df3b9a3cdf9c4.cu
#include <cstdlib> #include <cstdio> #include <cmath> #include <fstream> #include <vector> #include <iostream> #include <cassert> #include <algorithm> #include <ctime> #if defined __linux__ || defined __APPLE__ // "Compiled for Linux #else // Windows doesn't define these values by default, Linux does #define M_PI 3.141592653589793 #endif #ifdef __CUDACC__ #define CUDA_CALLABLE_MEMBER __host__ __device__ #else #define CUDA_CALLABLE_MEMBER #endif template<typename T> class Vec3 { public: T x, y, z; CUDA_CALLABLE_MEMBER Vec3() : x(T(0)), y(T(0)), z(T(0)) {} CUDA_CALLABLE_MEMBER Vec3(T xx) : x(xx), y(xx), z(xx) {} CUDA_CALLABLE_MEMBER Vec3(T xx, T yy, T zz) : x(xx), y(yy), z(zz) {} CUDA_CALLABLE_MEMBER Vec3& normalize() { T nor2 = length2(); if (nor2 > 0) { T invNor = 1 / sqrt(nor2); x *= invNor, y *= invNor, z *= invNor; } return *this; } CUDA_CALLABLE_MEMBER Vec3<T> operator * (const T &f) const { return Vec3<T>(x * f, y * f, z * f); } CUDA_CALLABLE_MEMBER Vec3<T> operator * (const Vec3<T> &v) const { return Vec3<T>(x * v.x, y * v.y, z * v.z); } CUDA_CALLABLE_MEMBER T dot(const Vec3<T> &v) const { return x * v.x + y * v.y + z * v.z; } CUDA_CALLABLE_MEMBER Vec3<T> operator - (const Vec3<T> &v) const { return Vec3<T>(x - v.x, y - v.y, z - v.z); } CUDA_CALLABLE_MEMBER Vec3<T> operator + (const Vec3<T> &v) const { return Vec3<T>(x + v.x, y + v.y, z + v.z); } CUDA_CALLABLE_MEMBER Vec3<T>& operator += (const Vec3<T> &v) { x += v.x, y += v.y, z += v.z; return *this; } CUDA_CALLABLE_MEMBER Vec3<T>& operator *= (const Vec3<T> &v) { x *= v.x, y *= v.y, z *= v.z; return *this; } CUDA_CALLABLE_MEMBER Vec3<T> operator - () const { return Vec3<T>(-x, -y, -z); } CUDA_CALLABLE_MEMBER T length2() const { return x * x + y * y + z * z; } CUDA_CALLABLE_MEMBER T length() const { return sqrt(length2()); } friend std::ostream & operator << (std::ostream &os, const Vec3<T> &v) { os << "[" << v.x << " " << v.y << " " << v.z << "]"; return os; } }; typedef Vec3<float> Vec3f; class Sphere { public: Vec3f center; /// position of the sphere float radius, radius2; /// sphere radius and radius^2 Vec3f surfaceColor, emissionColor; /// surface color and emission (light) float transparency, reflection; /// surface transparency and reflectivity Sphere(){} Sphere( const Vec3f &c, const float &r, const Vec3f &sc, const float &refl = 0, const float &transp = 0, const Vec3f &ec = 0) : center(c), radius(r), radius2(r * r), surfaceColor(sc), emissionColor(ec), transparency(transp), reflection(refl) { /* empty */ } // Compute a ray-sphere intersection using the geometric solution CUDA_CALLABLE_MEMBER bool intersect(const Vec3f &rayorig, const Vec3f &raydir, float &t0, float &t1) const { Vec3f l = center - rayorig; float tca = l.dot(raydir); if (tca < 0) return false; float d2 = l.dot(l) - tca * tca; if (d2 > radius2) return false; float thc = sqrt(radius2 - d2); t0 = tca - thc; t1 = tca + thc; return true; } }; // This variable controls the maximum recursion depth #define MAX_RAY_DEPTH 5 CUDA_CALLABLE_MEMBER float mix(const float &a, const float &b, const float &mix) { return b * mix + a * (1 - mix); } // This is the main trace function. It takes a ray as argument (defined by its origin // and direction). We test if this ray intersects any of the geometry in the scene. // If the ray intersects an object, we compute the intersection point, the normal // at the intersection point, and shade this point using this information. // Shading depends on the surface property (is it transparent, reflective, diffuse). // The function returns a color for the ray. If the ray intersects an object that // is the color of the object at the intersection point, otherwise it returns // the background color. CUDA_CALLABLE_MEMBER Vec3f trace( const Vec3f &rayorig, const Vec3f &raydir, Sphere *spheres, int spheresSize, const int &depth) { //if (raydir.length() != 1) std::cerr << "Error " << raydir << std::endl; float tnear = INFINITY; const Sphere* sphere = NULL; // find intersection of this ray with the sphere in the scene for (int i = 0; i < spheresSize; ++i) { float t0 = INFINITY, t1 = INFINITY; if (spheres[i].intersect(rayorig, raydir, t0, t1)) { if (t0 < 0) t0 = t1; if (t0 < tnear) { tnear = t0; sphere = &spheres[i]; } } } // if there's no intersection return black or background color if (!sphere) return Vec3f(2); Vec3f surfaceColor = 0; // color of the ray/surfaceof the object intersected by the ray Vec3f phit = rayorig + raydir * tnear; // point of intersection Vec3f nhit = phit - sphere->center; // normal at the intersection point nhit.normalize(); // normalize normal direction // If the normal and the view direction are not opposite to each other // reverse the normal direction. That also means we are inside the sphere so set // the inside bool to true. Finally reverse the sign of IdotN which we want // positive. float bias = 1e-4; // add some bias to the point from which we will be tracing bool inside = false; if (raydir.dot(nhit) > 0) nhit = -nhit, inside = true; // it's a diffuse object, no need to raytrace any further for (int i = 0; i < spheresSize; ++i) { if (spheres[i].emissionColor.x > 0) { // this is a light Vec3f transmission = 1; Vec3f lightDirection = spheres[i].center - phit; lightDirection.normalize(); for (int j = 0; j < spheresSize; ++j) { if (i != j) { float t0, t1; if (spheres[j].intersect(phit + nhit * bias, lightDirection, t0, t1)) { transmission = 0; break; } } } surfaceColor += sphere->surfaceColor * transmission * max(float(0), nhit.dot(lightDirection)) * spheres[i].emissionColor; } } return surfaceColor + sphere->emissionColor; } __global__ void fill_image(Sphere *spheres, int spheresSize, Vec3f *image, int width, int height) { int index = blockIdx.x * blockDim.x + threadIdx.x; int stride = blockDim.x * gridDim.x; float invWidth = 1 / float(width), invHeight = 1 / float(height); float fov = 30; float angle = tan(M_PI * 0.5 * fov / 180.); float aspectratio = width / float(height); for (int i = index; i < width * height; i += stride) { int y = i / width; int x = i - y * width; float xx = (2 * ((x + 0.5) * invWidth) - 1) * angle * aspectratio; float yy = (1 - 2 * ((y + 0.5) * invHeight)) * angle; Vec3f raydir(xx, yy, -1); raydir.normalize(); image[i] = trace(Vec3f(0), raydir, spheres, spheresSize, 0); } } // Main rendering function. We compute a camera ray for each pixel of the image // trace it and return a color. If the ray hits a sphere, we return the color of the // sphere at the intersection point, else we return the background color. void render(Sphere *spheres, int spheresSize) { unsigned width = 640, height = 480; int blockSize = 256; int numBlocks = (width * height + blockSize - 1) / blockSize; Vec3f *image; cudaMallocManaged(&image, height * width * sizeof(Vec3f)); // Trace rays clock_t begin = clock(); fill_image<<<numBlocks, blockSize>>>(spheres, spheresSize, image, width, height); cudaDeviceSynchronize(); clock_t end = clock(); printf("Elapsed time: %lf\n", double(end - begin) / CLOCKS_PER_SEC); // Save result to a PPM image (keep these flags if you compile under Windows) std::ofstream ofs("./untitled.ppm", std::ios::out | std::ios::binary); ofs << "P6\n" << width << " " << height << "\n255\n"; for (unsigned i = 0; i < width * height; ++i) { ofs << (unsigned char)(std::min(float(1), image[i].x) * 255) << (unsigned char)(std::min(float(1), image[i].y) * 255) << (unsigned char)(std::min(float(1), image[i].z) * 255); } ofs.close(); cudaFree(image); } // In the main function, we will create the scene which is composed of 5 spheres // and 1 light (which is also a sphere). Then, once the scene description is complete // we render that scene, by calling the render() function. int main(int argc, char **argv) { srand(13); int spheresSize = 7; Sphere *spheres; cudaMallocManaged(&spheres, spheresSize * sizeof(Sphere)); // position, radius, surface color, reflectivity, transparency, emission color spheres[0] = Sphere(Vec3f( 0.0, -10004, -20), 10000, Vec3f(0.20, 0.20, 0.20), 0, 0.0); spheres[1] = Sphere(Vec3f( 0.0, 0, -20), 4, Vec3f(1.00, 0.32, 0.36), 1, 0.5); spheres[2] = Sphere(Vec3f( 5.0, -1, -15), 2, Vec3f(0.90, 0.76, 0.46), 1, 0.0); spheres[3] = Sphere(Vec3f( 5.0, 0, -25), 3, Vec3f(0.65, 0.77, 0.97), 1, 0.0); spheres[4] = Sphere(Vec3f(-5.5, 0, -15), 3, Vec3f(0.90, 0.90, 0.90), 1, 0.0); spheres[5] = Sphere(Vec3f(3, 5, -30), 1, Vec3f(0.3, 0.50, 0.20), 1, 0.0); // light spheres[6] = Sphere(Vec3f( 0.0, 20, -30), 3, Vec3f(0.00, 0.00, 0.00), 0, 0.0, Vec3f(3)); render(spheres, spheresSize); cudaFree(spheres); return 0; }
7794b30178796384545f960b7fae92e7e97463f1.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" // Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved // modified from // https://github.com/open-mmlab/mmdetection/blob/master/mmdet/ops/dcn/src/deform_conv_cuda_kernel.cu // Original license: Apache 2.0 // clang-format off // modify from // https://github.com/chengdazhi/Deformable-Convolution-V2-PyTorch/blob/mmdetection/mmdet/ops/dcn/src/deform_conv_cuda_kernel.cu /*! ******************* BEGIN Caffe Copyright Notice and Disclaimer ***************** * * COPYRIGHT * * All contributions by the University of California: * Copyright (c) 2014-2017 The Regents of the University of California (Regents) * All rights reserved. * * All other contributions: * Copyright (c) 2014-2017, the respective contributors * All rights reserved. * * Caffe uses a shared copyright model: each contributor holds copyright over * their contributions to Caffe. The project versioning records all such * contribution and copyright details. If a contributor wants to further mark * their specific copyright on a particular contribution, they should indicate * their copyright solely in the commit message of the change when it is * committed. * * LICENSE * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are met: * * 1. Redistributions of source code must retain the above copyright notice, this * list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright notice, * this list of conditions and the following disclaimer in the documentation * and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" *AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE *IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE *FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL *DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR *SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER *CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, *OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE *OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. * * CONTRIBUTION AGREEMENT * * By contributing to the BVLC/caffe repository through pull-request, comment, * or otherwise, the contributor releases their content to the * license and copyright terms herein. * ***************** END Caffe Copyright Notice and Disclaimer ********************* * * Copyright (c) 2018 Microsoft * Licensed under The MIT License [see LICENSE for details] * \file modulated_deformable_im2col.cuh * \brief Function definitions of converting an image to * column matrix based on kernel, padding, dilation, and offset. * These functions are mainly used in deformable convolution operators. * \ref: https://arxiv.org/abs/1703.06211 * \author Yuwen Xiong, Haozhi Qi, Jifeng Dai, Xizhou Zhu, Han Hu, Dazhi Cheng */ #include <ATen/ATen.h> #include <ATen/hip/impl/HIPGuardImplMasqueradingAsCUDA.h> #include <float.h> #include <math.h> #include <stdio.h> #include <THH/THHAtomics.cuh> using namespace at; #define CUDA_KERNEL_LOOP(i, n) \ for (int i = blockIdx.x * blockDim.x + threadIdx.x; i < (n); \ i += blockDim.x * gridDim.x) namespace { const int CUDA_NUM_THREADS = 1024; const int kMaxGridNum = 65535; inline int GET_BLOCKS(const int N) { return ::min(kMaxGridNum, (N + CUDA_NUM_THREADS - 1) / CUDA_NUM_THREADS); } } template <typename scalar_t> __device__ scalar_t deformable_im2col_bilinear( const scalar_t* bottom_data, const int data_width, const int height, const int width, scalar_t h, scalar_t w) { int h_low = floor(h); int w_low = floor(w); int h_high = h_low + 1; int w_high = w_low + 1; scalar_t lh = h - h_low; scalar_t lw = w - w_low; scalar_t hh = 1 - lh, hw = 1 - lw; scalar_t v1 = 0; if (h_low >= 0 && w_low >= 0) v1 = bottom_data[h_low * data_width + w_low]; scalar_t v2 = 0; if (h_low >= 0 && w_high <= width - 1) v2 = bottom_data[h_low * data_width + w_high]; scalar_t v3 = 0; if (h_high <= height - 1 && w_low >= 0) v3 = bottom_data[h_high * data_width + w_low]; scalar_t v4 = 0; if (h_high <= height - 1 && w_high <= width - 1) v4 = bottom_data[h_high * data_width + w_high]; scalar_t w1 = hh * hw, w2 = hh * lw, w3 = lh * hw, w4 = lh * lw; scalar_t val = (w1 * v1 + w2 * v2 + w3 * v3 + w4 * v4); return val; } template <typename scalar_t> __device__ scalar_t get_gradient_weight( scalar_t argmax_h, scalar_t argmax_w, const int h, const int w, const int height, const int width) { if (argmax_h <= -1 || argmax_h >= height || argmax_w <= -1 || argmax_w >= width) { // empty return 0; } int argmax_h_low = floor(argmax_h); int argmax_w_low = floor(argmax_w); int argmax_h_high = argmax_h_low + 1; int argmax_w_high = argmax_w_low + 1; scalar_t weight = 0; if (h == argmax_h_low && w == argmax_w_low) weight = (h + 1 - argmax_h) * (w + 1 - argmax_w); if (h == argmax_h_low && w == argmax_w_high) weight = (h + 1 - argmax_h) * (argmax_w + 1 - w); if (h == argmax_h_high && w == argmax_w_low) weight = (argmax_h + 1 - h) * (w + 1 - argmax_w); if (h == argmax_h_high && w == argmax_w_high) weight = (argmax_h + 1 - h) * (argmax_w + 1 - w); return weight; } template <typename scalar_t> __device__ scalar_t get_coordinate_weight( scalar_t argmax_h, scalar_t argmax_w, const int height, const int width, const scalar_t* im_data, const int data_width, const int bp_dir) { if (argmax_h <= -1 || argmax_h >= height || argmax_w <= -1 || argmax_w >= width) { // empty return 0; } int argmax_h_low = floor(argmax_h); int argmax_w_low = floor(argmax_w); int argmax_h_high = argmax_h_low + 1; int argmax_w_high = argmax_w_low + 1; scalar_t weight = 0; if (bp_dir == 0) { if (argmax_h_low >= 0 && argmax_w_low >= 0) weight += -1 * (argmax_w_low + 1 - argmax_w) * im_data[argmax_h_low * data_width + argmax_w_low]; if (argmax_h_low >= 0 && argmax_w_high <= width - 1) weight += -1 * (argmax_w - argmax_w_low) * im_data[argmax_h_low * data_width + argmax_w_high]; if (argmax_h_high <= height - 1 && argmax_w_low >= 0) weight += (argmax_w_low + 1 - argmax_w) * im_data[argmax_h_high * data_width + argmax_w_low]; if (argmax_h_high <= height - 1 && argmax_w_high <= width - 1) weight += (argmax_w - argmax_w_low) * im_data[argmax_h_high * data_width + argmax_w_high]; } else if (bp_dir == 1) { if (argmax_h_low >= 0 && argmax_w_low >= 0) weight += -1 * (argmax_h_low + 1 - argmax_h) * im_data[argmax_h_low * data_width + argmax_w_low]; if (argmax_h_low >= 0 && argmax_w_high <= width - 1) weight += (argmax_h_low + 1 - argmax_h) * im_data[argmax_h_low * data_width + argmax_w_high]; if (argmax_h_high <= height - 1 && argmax_w_low >= 0) weight += -1 * (argmax_h - argmax_h_low) * im_data[argmax_h_high * data_width + argmax_w_low]; if (argmax_h_high <= height - 1 && argmax_w_high <= width - 1) weight += (argmax_h - argmax_h_low) * im_data[argmax_h_high * data_width + argmax_w_high]; } return weight; } template <typename scalar_t> __global__ void deformable_im2col_gpu_kernel( const int n, const scalar_t* data_im, const scalar_t* data_offset, const int height, const int width, const int kernel_h, const int kernel_w, const int pad_h, const int pad_w, const int stride_h, const int stride_w, const int dilation_h, const int dilation_w, const int channel_per_deformable_group, const int batch_size, const int num_channels, const int deformable_group, const int height_col, const int width_col, scalar_t* data_col) { CUDA_KERNEL_LOOP(index, n) { // index index of output matrix const int w_col = index % width_col; const int h_col = (index / width_col) % height_col; const int b_col = (index / width_col / height_col) % batch_size; const int c_im = (index / width_col / height_col) / batch_size; const int c_col = c_im * kernel_h * kernel_w; // compute deformable group index const int deformable_group_index = c_im / channel_per_deformable_group; const int h_in = h_col * stride_h - pad_h; const int w_in = w_col * stride_w - pad_w; scalar_t* data_col_ptr = data_col + ((c_col * batch_size + b_col) * height_col + h_col) * width_col + w_col; // const scalar_t* data_im_ptr = data_im + ((b_col * num_channels + c_im) * // height + h_in) * width + w_in; const scalar_t* data_im_ptr = data_im + (b_col * num_channels + c_im) * height * width; const scalar_t* data_offset_ptr = data_offset + (b_col * deformable_group + deformable_group_index) * 2 * kernel_h * kernel_w * height_col * width_col; for (int i = 0; i < kernel_h; ++i) { for (int j = 0; j < kernel_w; ++j) { const int data_offset_h_ptr = ((2 * (i * kernel_w + j)) * height_col + h_col) * width_col + w_col; const int data_offset_w_ptr = ((2 * (i * kernel_w + j) + 1) * height_col + h_col) * width_col + w_col; const scalar_t offset_h = data_offset_ptr[data_offset_h_ptr]; const scalar_t offset_w = data_offset_ptr[data_offset_w_ptr]; scalar_t val = static_cast<scalar_t>(0); const scalar_t h_im = h_in + i * dilation_h + offset_h; const scalar_t w_im = w_in + j * dilation_w + offset_w; if (h_im > -1 && w_im > -1 && h_im < height && w_im < width) { // const scalar_t map_h = i * dilation_h + offset_h; // const scalar_t map_w = j * dilation_w + offset_w; // const int cur_height = height - h_in; // const int cur_width = width - w_in; // val = deformable_im2col_bilinear(data_im_ptr, width, cur_height, // cur_width, map_h, map_w); val = deformable_im2col_bilinear( data_im_ptr, width, height, width, h_im, w_im); } *data_col_ptr = val; data_col_ptr += batch_size * height_col * width_col; } } } } template <typename scalar_t> __global__ void deformable_col2im_gpu_kernel( const int n, const scalar_t* data_col, const scalar_t* data_offset, const int channels, const int height, const int width, const int kernel_h, const int kernel_w, const int pad_h, const int pad_w, const int stride_h, const int stride_w, const int dilation_h, const int dilation_w, const int channel_per_deformable_group, const int batch_size, const int deformable_group, const int height_col, const int width_col, scalar_t* grad_im) { CUDA_KERNEL_LOOP(index, n) { const int j = (index / width_col / height_col / batch_size) % kernel_w; const int i = (index / width_col / height_col / batch_size / kernel_w) % kernel_h; const int c = index / width_col / height_col / batch_size / kernel_w / kernel_h; // compute the start and end of the output const int deformable_group_index = c / channel_per_deformable_group; int w_out = index % width_col; int h_out = (index / width_col) % height_col; int b = (index / width_col / height_col) % batch_size; int w_in = w_out * stride_w - pad_w; int h_in = h_out * stride_h - pad_h; const scalar_t* data_offset_ptr = data_offset + (b * deformable_group + deformable_group_index) * 2 * kernel_h * kernel_w * height_col * width_col; const int data_offset_h_ptr = ((2 * (i * kernel_w + j)) * height_col + h_out) * width_col + w_out; const int data_offset_w_ptr = ((2 * (i * kernel_w + j) + 1) * height_col + h_out) * width_col + w_out; const scalar_t offset_h = data_offset_ptr[data_offset_h_ptr]; const scalar_t offset_w = data_offset_ptr[data_offset_w_ptr]; const scalar_t cur_inv_h_data = h_in + i * dilation_h + offset_h; const scalar_t cur_inv_w_data = w_in + j * dilation_w + offset_w; const scalar_t cur_top_grad = data_col[index]; const int cur_h = (int)cur_inv_h_data; const int cur_w = (int)cur_inv_w_data; for (int dy = -2; dy <= 2; dy++) { for (int dx = -2; dx <= 2; dx++) { if (cur_h + dy >= 0 && cur_h + dy < height && cur_w + dx >= 0 && cur_w + dx < width && abs(cur_inv_h_data - (cur_h + dy)) < 1 && abs(cur_inv_w_data - (cur_w + dx)) < 1) { int cur_bottom_grad_pos = ((b * channels + c) * height + cur_h + dy) * width + cur_w + dx; scalar_t weight = get_gradient_weight( cur_inv_h_data, cur_inv_w_data, cur_h + dy, cur_w + dx, height, width); atomicAdd(grad_im + cur_bottom_grad_pos, weight * cur_top_grad); } } } } } template <typename scalar_t> __global__ void deformable_col2im_coord_gpu_kernel( const int n, const scalar_t* data_col, const scalar_t* data_im, const scalar_t* data_offset, const int channels, const int height, const int width, const int kernel_h, const int kernel_w, const int pad_h, const int pad_w, const int stride_h, const int stride_w, const int dilation_h, const int dilation_w, const int channel_per_deformable_group, const int batch_size, const int offset_channels, const int deformable_group, const int height_col, const int width_col, scalar_t* grad_offset) { CUDA_KERNEL_LOOP(index, n) { scalar_t val = 0; int w = index % width_col; int h = (index / width_col) % height_col; int c = (index / width_col / height_col) % offset_channels; int b = (index / width_col / height_col) / offset_channels; // compute the start and end of the output const int deformable_group_index = c / (2 * kernel_h * kernel_w); const int col_step = kernel_h * kernel_w; int cnt = 0; const scalar_t* data_col_ptr = data_col + deformable_group_index * channel_per_deformable_group * batch_size * width_col * height_col; const scalar_t* data_im_ptr = data_im + (b * deformable_group + deformable_group_index) * channel_per_deformable_group / kernel_h / kernel_w * height * width; const scalar_t* data_offset_ptr = data_offset + (b * deformable_group + deformable_group_index) * 2 * kernel_h * kernel_w * height_col * width_col; const int offset_c = c - deformable_group_index * 2 * kernel_h * kernel_w; for (int col_c = (offset_c / 2); col_c < channel_per_deformable_group; col_c += col_step) { const int col_pos = (((col_c * batch_size + b) * height_col) + h) * width_col + w; const int bp_dir = offset_c % 2; int j = (col_pos / width_col / height_col / batch_size) % kernel_w; int i = (col_pos / width_col / height_col / batch_size / kernel_w) % kernel_h; int w_out = col_pos % width_col; int h_out = (col_pos / width_col) % height_col; int w_in = w_out * stride_w - pad_w; int h_in = h_out * stride_h - pad_h; const int data_offset_h_ptr = (((2 * (i * kernel_w + j)) * height_col + h_out) * width_col + w_out); const int data_offset_w_ptr = (((2 * (i * kernel_w + j) + 1) * height_col + h_out) * width_col + w_out); const scalar_t offset_h = data_offset_ptr[data_offset_h_ptr]; const scalar_t offset_w = data_offset_ptr[data_offset_w_ptr]; scalar_t inv_h = h_in + i * dilation_h + offset_h; scalar_t inv_w = w_in + j * dilation_w + offset_w; if (inv_h <= -1 || inv_w <= -1 || inv_h >= height || inv_w >= width) { inv_h = inv_w = -2; } const scalar_t weight = get_coordinate_weight( inv_h, inv_w, height, width, data_im_ptr + cnt * height * width, width, bp_dir); val += weight * data_col_ptr[col_pos]; cnt += 1; } grad_offset[index] = val; } } namespace detectron2 { void deformable_im2col( const at::Tensor data_im, const at::Tensor data_offset, const int channels, const int height, const int width, const int ksize_h, const int ksize_w, const int pad_h, const int pad_w, const int stride_h, const int stride_w, const int dilation_h, const int dilation_w, const int parallel_imgs, const int deformable_group, at::Tensor data_col) { // num_axes should be smaller than block size // todo: check parallel_imgs is correctly passed in int height_col = (height + 2 * pad_h - (dilation_h * (ksize_h - 1) + 1)) / stride_h + 1; int width_col = (width + 2 * pad_w - (dilation_w * (ksize_w - 1) + 1)) / stride_w + 1; int num_kernels = channels * height_col * width_col * parallel_imgs; int channel_per_deformable_group = channels / deformable_group; at::hip::HIPGuardMasqueradingAsCUDA device_guard(data_im.device()); hipStream_t stream = at::hip::getCurrentHIPStreamMasqueradingAsCUDA(); AT_DISPATCH_FLOATING_TYPES_AND_HALF( data_im.type(), "deformable_im2col_gpu", ([&] { const scalar_t* data_im_ = data_im.data_ptr<scalar_t>(); const scalar_t* data_offset_ = data_offset.data_ptr<scalar_t>(); scalar_t* data_col_ = data_col.data_ptr<scalar_t>(); hipLaunchKernelGGL(( deformable_im2col_gpu_kernel), dim3(GET_BLOCKS(num_kernels)), dim3(CUDA_NUM_THREADS), 0, stream, num_kernels, data_im_, data_offset_, height, width, ksize_h, ksize_w, pad_h, pad_w, stride_h, stride_w, dilation_h, dilation_w, channel_per_deformable_group, parallel_imgs, channels, deformable_group, height_col, width_col, data_col_); })); hipError_t err = hipGetLastError(); if (err != hipSuccess) { printf("error in deformable_im2col: %s\n", hipGetErrorString(err)); } } void deformable_col2im( const at::Tensor data_col, const at::Tensor data_offset, const int channels, const int height, const int width, const int ksize_h, const int ksize_w, const int pad_h, const int pad_w, const int stride_h, const int stride_w, const int dilation_h, const int dilation_w, const int parallel_imgs, const int deformable_group, at::Tensor grad_im) { // todo: make sure parallel_imgs is passed in correctly int height_col = (height + 2 * pad_h - (dilation_h * (ksize_h - 1) + 1)) / stride_h + 1; int width_col = (width + 2 * pad_w - (dilation_w * (ksize_w - 1) + 1)) / stride_w + 1; int num_kernels = channels * ksize_h * ksize_w * height_col * width_col * parallel_imgs; int channel_per_deformable_group = channels / deformable_group; at::hip::HIPGuardMasqueradingAsCUDA device_guard(data_col.device()); hipStream_t stream = at::hip::getCurrentHIPStreamMasqueradingAsCUDA(); AT_DISPATCH_FLOATING_TYPES_AND_HALF( data_col.type(), "deformable_col2im_gpu", ([&] { const scalar_t* data_col_ = data_col.data_ptr<scalar_t>(); const scalar_t* data_offset_ = data_offset.data_ptr<scalar_t>(); scalar_t* grad_im_ = grad_im.data_ptr<scalar_t>(); hipLaunchKernelGGL(( deformable_col2im_gpu_kernel), dim3(GET_BLOCKS(num_kernels)), dim3(CUDA_NUM_THREADS), 0, stream, num_kernels, data_col_, data_offset_, channels, height, width, ksize_h, ksize_w, pad_h, pad_w, stride_h, stride_w, dilation_h, dilation_w, channel_per_deformable_group, parallel_imgs, deformable_group, height_col, width_col, grad_im_); })); hipError_t err = hipGetLastError(); if (err != hipSuccess) { printf("error in deformable_col2im: %s\n", hipGetErrorString(err)); } } void deformable_col2im_coord( const at::Tensor data_col, const at::Tensor data_im, const at::Tensor data_offset, const int channels, const int height, const int width, const int ksize_h, const int ksize_w, const int pad_h, const int pad_w, const int stride_h, const int stride_w, const int dilation_h, const int dilation_w, const int parallel_imgs, const int deformable_group, at::Tensor grad_offset) { int height_col = (height + 2 * pad_h - (dilation_h * (ksize_h - 1) + 1)) / stride_h + 1; int width_col = (width + 2 * pad_w - (dilation_w * (ksize_w - 1) + 1)) / stride_w + 1; int num_kernels = height_col * width_col * 2 * ksize_h * ksize_w * deformable_group * parallel_imgs; int channel_per_deformable_group = channels * ksize_h * ksize_w / deformable_group; at::hip::HIPGuardMasqueradingAsCUDA device_guard(data_col.device()); hipStream_t stream = at::hip::getCurrentHIPStreamMasqueradingAsCUDA(); AT_DISPATCH_FLOATING_TYPES_AND_HALF( data_col.type(), "deformable_col2im_coord_gpu", ([&] { const scalar_t* data_col_ = data_col.data_ptr<scalar_t>(); const scalar_t* data_im_ = data_im.data_ptr<scalar_t>(); const scalar_t* data_offset_ = data_offset.data_ptr<scalar_t>(); scalar_t* grad_offset_ = grad_offset.data_ptr<scalar_t>(); hipLaunchKernelGGL(( deformable_col2im_coord_gpu_kernel), dim3(GET_BLOCKS(num_kernels)), dim3(CUDA_NUM_THREADS), 0, stream, num_kernels, data_col_, data_im_, data_offset_, channels, height, width, ksize_h, ksize_w, pad_h, pad_w, stride_h, stride_w, dilation_h, dilation_w, channel_per_deformable_group, parallel_imgs, 2 * ksize_h * ksize_w * deformable_group, deformable_group, height_col, width_col, grad_offset_); })); } } // namespace detectron2 template <typename scalar_t> __device__ scalar_t dmcn_im2col_bilinear( const scalar_t* bottom_data, const int data_width, const int height, const int width, scalar_t h, scalar_t w) { int h_low = floor(h); int w_low = floor(w); int h_high = h_low + 1; int w_high = w_low + 1; scalar_t lh = h - h_low; scalar_t lw = w - w_low; scalar_t hh = 1 - lh, hw = 1 - lw; scalar_t v1 = 0; if (h_low >= 0 && w_low >= 0) v1 = bottom_data[h_low * data_width + w_low]; scalar_t v2 = 0; if (h_low >= 0 && w_high <= width - 1) v2 = bottom_data[h_low * data_width + w_high]; scalar_t v3 = 0; if (h_high <= height - 1 && w_low >= 0) v3 = bottom_data[h_high * data_width + w_low]; scalar_t v4 = 0; if (h_high <= height - 1 && w_high <= width - 1) v4 = bottom_data[h_high * data_width + w_high]; scalar_t w1 = hh * hw, w2 = hh * lw, w3 = lh * hw, w4 = lh * lw; scalar_t val = (w1 * v1 + w2 * v2 + w3 * v3 + w4 * v4); return val; } template <typename scalar_t> __device__ scalar_t dmcn_get_gradient_weight( scalar_t argmax_h, scalar_t argmax_w, const int h, const int w, const int height, const int width) { if (argmax_h <= -1 || argmax_h >= height || argmax_w <= -1 || argmax_w >= width) { // empty return 0; } int argmax_h_low = floor(argmax_h); int argmax_w_low = floor(argmax_w); int argmax_h_high = argmax_h_low + 1; int argmax_w_high = argmax_w_low + 1; scalar_t weight = 0; if (h == argmax_h_low && w == argmax_w_low) weight = (h + 1 - argmax_h) * (w + 1 - argmax_w); if (h == argmax_h_low && w == argmax_w_high) weight = (h + 1 - argmax_h) * (argmax_w + 1 - w); if (h == argmax_h_high && w == argmax_w_low) weight = (argmax_h + 1 - h) * (w + 1 - argmax_w); if (h == argmax_h_high && w == argmax_w_high) weight = (argmax_h + 1 - h) * (argmax_w + 1 - w); return weight; } template <typename scalar_t> __device__ scalar_t dmcn_get_coordinate_weight( scalar_t argmax_h, scalar_t argmax_w, const int height, const int width, const scalar_t* im_data, const int data_width, const int bp_dir) { if (argmax_h <= -1 || argmax_h >= height || argmax_w <= -1 || argmax_w >= width) { // empty return 0; } int argmax_h_low = floor(argmax_h); int argmax_w_low = floor(argmax_w); int argmax_h_high = argmax_h_low + 1; int argmax_w_high = argmax_w_low + 1; scalar_t weight = 0; if (bp_dir == 0) { if (argmax_h_low >= 0 && argmax_w_low >= 0) weight += -1 * (argmax_w_low + 1 - argmax_w) * im_data[argmax_h_low * data_width + argmax_w_low]; if (argmax_h_low >= 0 && argmax_w_high <= width - 1) weight += -1 * (argmax_w - argmax_w_low) * im_data[argmax_h_low * data_width + argmax_w_high]; if (argmax_h_high <= height - 1 && argmax_w_low >= 0) weight += (argmax_w_low + 1 - argmax_w) * im_data[argmax_h_high * data_width + argmax_w_low]; if (argmax_h_high <= height - 1 && argmax_w_high <= width - 1) weight += (argmax_w - argmax_w_low) * im_data[argmax_h_high * data_width + argmax_w_high]; } else if (bp_dir == 1) { if (argmax_h_low >= 0 && argmax_w_low >= 0) weight += -1 * (argmax_h_low + 1 - argmax_h) * im_data[argmax_h_low * data_width + argmax_w_low]; if (argmax_h_low >= 0 && argmax_w_high <= width - 1) weight += (argmax_h_low + 1 - argmax_h) * im_data[argmax_h_low * data_width + argmax_w_high]; if (argmax_h_high <= height - 1 && argmax_w_low >= 0) weight += -1 * (argmax_h - argmax_h_low) * im_data[argmax_h_high * data_width + argmax_w_low]; if (argmax_h_high <= height - 1 && argmax_w_high <= width - 1) weight += (argmax_h - argmax_h_low) * im_data[argmax_h_high * data_width + argmax_w_high]; } return weight; } template <typename scalar_t> __global__ void modulated_deformable_im2col_gpu_kernel( const int n, const scalar_t* data_im, const scalar_t* data_offset, const scalar_t* data_mask, const int height, const int width, const int kernel_h, const int kernel_w, const int pad_h, const int pad_w, const int stride_h, const int stride_w, const int dilation_h, const int dilation_w, const int channel_per_deformable_group, const int batch_size, const int num_channels, const int deformable_group, const int height_col, const int width_col, scalar_t* data_col) { CUDA_KERNEL_LOOP(index, n) { // index index of output matrix const int w_col = index % width_col; const int h_col = (index / width_col) % height_col; const int b_col = (index / width_col / height_col) % batch_size; const int c_im = (index / width_col / height_col) / batch_size; const int c_col = c_im * kernel_h * kernel_w; // compute deformable group index const int deformable_group_index = c_im / channel_per_deformable_group; const int h_in = h_col * stride_h - pad_h; const int w_in = w_col * stride_w - pad_w; scalar_t* data_col_ptr = data_col + ((c_col * batch_size + b_col) * height_col + h_col) * width_col + w_col; // const float* data_im_ptr = data_im + ((b_col * num_channels + c_im) * // height + h_in) * width + w_in; const scalar_t* data_im_ptr = data_im + (b_col * num_channels + c_im) * height * width; const scalar_t* data_offset_ptr = data_offset + (b_col * deformable_group + deformable_group_index) * 2 * kernel_h * kernel_w * height_col * width_col; const scalar_t* data_mask_ptr = data_mask + (b_col * deformable_group + deformable_group_index) * kernel_h * kernel_w * height_col * width_col; for (int i = 0; i < kernel_h; ++i) { for (int j = 0; j < kernel_w; ++j) { const int data_offset_h_ptr = ((2 * (i * kernel_w + j)) * height_col + h_col) * width_col + w_col; const int data_offset_w_ptr = ((2 * (i * kernel_w + j) + 1) * height_col + h_col) * width_col + w_col; const int data_mask_hw_ptr = ((i * kernel_w + j) * height_col + h_col) * width_col + w_col; const scalar_t offset_h = data_offset_ptr[data_offset_h_ptr]; const scalar_t offset_w = data_offset_ptr[data_offset_w_ptr]; const scalar_t mask = data_mask_ptr[data_mask_hw_ptr]; scalar_t val = static_cast<scalar_t>(0); const scalar_t h_im = h_in + i * dilation_h + offset_h; const scalar_t w_im = w_in + j * dilation_w + offset_w; // if (h_im >= 0 && w_im >= 0 && h_im < height && w_im < width) { if (h_im > -1 && w_im > -1 && h_im < height && w_im < width) { // const float map_h = i * dilation_h + offset_h; // const float map_w = j * dilation_w + offset_w; // const int cur_height = height - h_in; // const int cur_width = width - w_in; // val = dmcn_im2col_bilinear(data_im_ptr, width, cur_height, // cur_width, map_h, map_w); val = dmcn_im2col_bilinear( data_im_ptr, width, height, width, h_im, w_im); } *data_col_ptr = val * mask; data_col_ptr += batch_size * height_col * width_col; // data_col_ptr += height_col * width_col; } } } } template <typename scalar_t> __global__ void modulated_deformable_col2im_gpu_kernel( const int n, const scalar_t* data_col, const scalar_t* data_offset, const scalar_t* data_mask, const int channels, const int height, const int width, const int kernel_h, const int kernel_w, const int pad_h, const int pad_w, const int stride_h, const int stride_w, const int dilation_h, const int dilation_w, const int channel_per_deformable_group, const int batch_size, const int deformable_group, const int height_col, const int width_col, scalar_t* grad_im) { CUDA_KERNEL_LOOP(index, n) { const int j = (index / width_col / height_col / batch_size) % kernel_w; const int i = (index / width_col / height_col / batch_size / kernel_w) % kernel_h; const int c = index / width_col / height_col / batch_size / kernel_w / kernel_h; // compute the start and end of the output const int deformable_group_index = c / channel_per_deformable_group; int w_out = index % width_col; int h_out = (index / width_col) % height_col; int b = (index / width_col / height_col) % batch_size; int w_in = w_out * stride_w - pad_w; int h_in = h_out * stride_h - pad_h; const scalar_t* data_offset_ptr = data_offset + (b * deformable_group + deformable_group_index) * 2 * kernel_h * kernel_w * height_col * width_col; const scalar_t* data_mask_ptr = data_mask + (b * deformable_group + deformable_group_index) * kernel_h * kernel_w * height_col * width_col; const int data_offset_h_ptr = ((2 * (i * kernel_w + j)) * height_col + h_out) * width_col + w_out; const int data_offset_w_ptr = ((2 * (i * kernel_w + j) + 1) * height_col + h_out) * width_col + w_out; const int data_mask_hw_ptr = ((i * kernel_w + j) * height_col + h_out) * width_col + w_out; const scalar_t offset_h = data_offset_ptr[data_offset_h_ptr]; const scalar_t offset_w = data_offset_ptr[data_offset_w_ptr]; const scalar_t mask = data_mask_ptr[data_mask_hw_ptr]; const scalar_t cur_inv_h_data = h_in + i * dilation_h + offset_h; const scalar_t cur_inv_w_data = w_in + j * dilation_w + offset_w; const scalar_t cur_top_grad = data_col[index] * mask; const int cur_h = (int)cur_inv_h_data; const int cur_w = (int)cur_inv_w_data; for (int dy = -2; dy <= 2; dy++) { for (int dx = -2; dx <= 2; dx++) { if (cur_h + dy >= 0 && cur_h + dy < height && cur_w + dx >= 0 && cur_w + dx < width && abs(cur_inv_h_data - (cur_h + dy)) < 1 && abs(cur_inv_w_data - (cur_w + dx)) < 1) { int cur_bottom_grad_pos = ((b * channels + c) * height + cur_h + dy) * width + cur_w + dx; scalar_t weight = dmcn_get_gradient_weight( cur_inv_h_data, cur_inv_w_data, cur_h + dy, cur_w + dx, height, width); atomicAdd(grad_im + cur_bottom_grad_pos, weight * cur_top_grad); } } } } } template <typename scalar_t> __global__ void modulated_deformable_col2im_coord_gpu_kernel( const int n, const scalar_t* data_col, const scalar_t* data_im, const scalar_t* data_offset, const scalar_t* data_mask, const int channels, const int height, const int width, const int kernel_h, const int kernel_w, const int pad_h, const int pad_w, const int stride_h, const int stride_w, const int dilation_h, const int dilation_w, const int channel_per_deformable_group, const int batch_size, const int offset_channels, const int deformable_group, const int height_col, const int width_col, scalar_t* grad_offset, scalar_t* grad_mask) { CUDA_KERNEL_LOOP(index, n) { scalar_t val = 0, mval = 0; int w = index % width_col; int h = (index / width_col) % height_col; int c = (index / width_col / height_col) % offset_channels; int b = (index / width_col / height_col) / offset_channels; // compute the start and end of the output const int deformable_group_index = c / (2 * kernel_h * kernel_w); const int col_step = kernel_h * kernel_w; int cnt = 0; const scalar_t* data_col_ptr = data_col + deformable_group_index * channel_per_deformable_group * batch_size * width_col * height_col; const scalar_t* data_im_ptr = data_im + (b * deformable_group + deformable_group_index) * channel_per_deformable_group / kernel_h / kernel_w * height * width; const scalar_t* data_offset_ptr = data_offset + (b * deformable_group + deformable_group_index) * 2 * kernel_h * kernel_w * height_col * width_col; const scalar_t* data_mask_ptr = data_mask + (b * deformable_group + deformable_group_index) * kernel_h * kernel_w * height_col * width_col; const int offset_c = c - deformable_group_index * 2 * kernel_h * kernel_w; for (int col_c = (offset_c / 2); col_c < channel_per_deformable_group; col_c += col_step) { const int col_pos = (((col_c * batch_size + b) * height_col) + h) * width_col + w; const int bp_dir = offset_c % 2; int j = (col_pos / width_col / height_col / batch_size) % kernel_w; int i = (col_pos / width_col / height_col / batch_size / kernel_w) % kernel_h; int w_out = col_pos % width_col; int h_out = (col_pos / width_col) % height_col; int w_in = w_out * stride_w - pad_w; int h_in = h_out * stride_h - pad_h; const int data_offset_h_ptr = (((2 * (i * kernel_w + j)) * height_col + h_out) * width_col + w_out); const int data_offset_w_ptr = (((2 * (i * kernel_w + j) + 1) * height_col + h_out) * width_col + w_out); const int data_mask_hw_ptr = (((i * kernel_w + j) * height_col + h_out) * width_col + w_out); const scalar_t offset_h = data_offset_ptr[data_offset_h_ptr]; const scalar_t offset_w = data_offset_ptr[data_offset_w_ptr]; const scalar_t mask = data_mask_ptr[data_mask_hw_ptr]; scalar_t inv_h = h_in + i * dilation_h + offset_h; scalar_t inv_w = w_in + j * dilation_w + offset_w; if (inv_h <= -1 || inv_w <= -1 || inv_h >= height || inv_w >= width) { inv_h = inv_w = -2; } else { mval += data_col_ptr[col_pos] * dmcn_im2col_bilinear( data_im_ptr + cnt * height * width, width, height, width, inv_h, inv_w); } const scalar_t weight = dmcn_get_coordinate_weight( inv_h, inv_w, height, width, data_im_ptr + cnt * height * width, width, bp_dir); val += weight * data_col_ptr[col_pos] * mask; cnt += 1; } // KERNEL_ASSIGN(grad_offset[index], offset_req, val); grad_offset[index] = val; if (offset_c % 2 == 0) // KERNEL_ASSIGN(grad_mask[(((b * deformable_group + // deformable_group_index) * kernel_h * kernel_w + offset_c / 2) * // height_col + h) * width_col + w], mask_req, mval); grad_mask [(((b * deformable_group + deformable_group_index) * kernel_h * kernel_w + offset_c / 2) * height_col + h) * width_col + w] = mval; } } namespace detectron2 { void modulated_deformable_im2col_cuda( const at::Tensor data_im, const at::Tensor data_offset, const at::Tensor data_mask, const int batch_size, const int channels, const int height_im, const int width_im, const int height_col, const int width_col, const int kernel_h, const int kenerl_w, const int pad_h, const int pad_w, const int stride_h, const int stride_w, const int dilation_h, const int dilation_w, const int deformable_group, at::Tensor data_col) { // num_axes should be smaller than block size const int channel_per_deformable_group = channels / deformable_group; const int num_kernels = channels * batch_size * height_col * width_col; at::hip::HIPGuardMasqueradingAsCUDA device_guard(data_im.device()); hipStream_t stream = at::hip::getCurrentHIPStreamMasqueradingAsCUDA(); AT_DISPATCH_FLOATING_TYPES_AND_HALF( data_im.type(), "modulated_deformable_im2col_gpu", ([&] { const scalar_t* data_im_ = data_im.data_ptr<scalar_t>(); const scalar_t* data_offset_ = data_offset.data_ptr<scalar_t>(); const scalar_t* data_mask_ = data_mask.data_ptr<scalar_t>(); scalar_t* data_col_ = data_col.data_ptr<scalar_t>(); hipLaunchKernelGGL(( modulated_deformable_im2col_gpu_kernel), dim3(GET_BLOCKS(num_kernels)), dim3(CUDA_NUM_THREADS), 0, stream, num_kernels, data_im_, data_offset_, data_mask_, height_im, width_im, kernel_h, kenerl_w, pad_h, pad_w, stride_h, stride_w, dilation_h, dilation_w, channel_per_deformable_group, batch_size, channels, deformable_group, height_col, width_col, data_col_); })); hipError_t err = hipGetLastError(); if (err != hipSuccess) { printf( "error in modulated_deformable_im2col_cuda: %s\n", hipGetErrorString(err)); } } void modulated_deformable_col2im_cuda( const at::Tensor data_col, const at::Tensor data_offset, const at::Tensor data_mask, const int batch_size, const int channels, const int height_im, const int width_im, const int height_col, const int width_col, const int kernel_h, const int kernel_w, const int pad_h, const int pad_w, const int stride_h, const int stride_w, const int dilation_h, const int dilation_w, const int deformable_group, at::Tensor grad_im) { const int channel_per_deformable_group = channels / deformable_group; const int num_kernels = channels * kernel_h * kernel_w * batch_size * height_col * width_col; at::hip::HIPGuardMasqueradingAsCUDA device_guard(data_col.device()); hipStream_t stream = at::hip::getCurrentHIPStreamMasqueradingAsCUDA(); AT_DISPATCH_FLOATING_TYPES_AND_HALF( data_col.type(), "modulated_deformable_col2im_gpu", ([&] { const scalar_t* data_col_ = data_col.data_ptr<scalar_t>(); const scalar_t* data_offset_ = data_offset.data_ptr<scalar_t>(); const scalar_t* data_mask_ = data_mask.data_ptr<scalar_t>(); scalar_t* grad_im_ = grad_im.data_ptr<scalar_t>(); hipLaunchKernelGGL(( modulated_deformable_col2im_gpu_kernel), dim3(GET_BLOCKS(num_kernels)), dim3(CUDA_NUM_THREADS), 0, stream, num_kernels, data_col_, data_offset_, data_mask_, channels, height_im, width_im, kernel_h, kernel_w, pad_h, pad_w, stride_h, stride_w, dilation_h, dilation_w, channel_per_deformable_group, batch_size, deformable_group, height_col, width_col, grad_im_); })); hipError_t err = hipGetLastError(); if (err != hipSuccess) { printf( "error in modulated_deformable_col2im_cuda: %s\n", hipGetErrorString(err)); } } void modulated_deformable_col2im_coord_cuda( const at::Tensor data_col, const at::Tensor data_im, const at::Tensor data_offset, const at::Tensor data_mask, const int batch_size, const int channels, const int height_im, const int width_im, const int height_col, const int width_col, const int kernel_h, const int kernel_w, const int pad_h, const int pad_w, const int stride_h, const int stride_w, const int dilation_h, const int dilation_w, const int deformable_group, at::Tensor grad_offset, at::Tensor grad_mask) { const int num_kernels = batch_size * height_col * width_col * 2 * kernel_h * kernel_w * deformable_group; const int channel_per_deformable_group = channels * kernel_h * kernel_w / deformable_group; at::hip::HIPGuardMasqueradingAsCUDA device_guard(data_col.device()); hipStream_t stream = at::hip::getCurrentHIPStreamMasqueradingAsCUDA(); AT_DISPATCH_FLOATING_TYPES_AND_HALF( data_col.type(), "modulated_deformable_col2im_coord_gpu", ([&] { const scalar_t* data_col_ = data_col.data_ptr<scalar_t>(); const scalar_t* data_im_ = data_im.data_ptr<scalar_t>(); const scalar_t* data_offset_ = data_offset.data_ptr<scalar_t>(); const scalar_t* data_mask_ = data_mask.data_ptr<scalar_t>(); scalar_t* grad_offset_ = grad_offset.data_ptr<scalar_t>(); scalar_t* grad_mask_ = grad_mask.data_ptr<scalar_t>(); hipLaunchKernelGGL(( modulated_deformable_col2im_coord_gpu_kernel), dim3(GET_BLOCKS(num_kernels)), dim3(CUDA_NUM_THREADS), 0, stream, num_kernels, data_col_, data_im_, data_offset_, data_mask_, channels, height_im, width_im, kernel_h, kernel_w, pad_h, pad_w, stride_h, stride_w, dilation_h, dilation_w, channel_per_deformable_group, batch_size, 2 * kernel_h * kernel_w * deformable_group, deformable_group, height_col, width_col, grad_offset_, grad_mask_); })); hipError_t err = hipGetLastError(); if (err != hipSuccess) { printf( "error in modulated_deformable_col2im_coord_cuda: %s\n", hipGetErrorString(err)); } } } // namespace detectron2
7794b30178796384545f960b7fae92e7e97463f1.cu
// Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved // modified from // https://github.com/open-mmlab/mmdetection/blob/master/mmdet/ops/dcn/src/deform_conv_cuda_kernel.cu // Original license: Apache 2.0 // clang-format off // modify from // https://github.com/chengdazhi/Deformable-Convolution-V2-PyTorch/blob/mmdetection/mmdet/ops/dcn/src/deform_conv_cuda_kernel.cu /*! ******************* BEGIN Caffe Copyright Notice and Disclaimer ***************** * * COPYRIGHT * * All contributions by the University of California: * Copyright (c) 2014-2017 The Regents of the University of California (Regents) * All rights reserved. * * All other contributions: * Copyright (c) 2014-2017, the respective contributors * All rights reserved. * * Caffe uses a shared copyright model: each contributor holds copyright over * their contributions to Caffe. The project versioning records all such * contribution and copyright details. If a contributor wants to further mark * their specific copyright on a particular contribution, they should indicate * their copyright solely in the commit message of the change when it is * committed. * * LICENSE * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are met: * * 1. Redistributions of source code must retain the above copyright notice, this * list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright notice, * this list of conditions and the following disclaimer in the documentation * and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" *AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE *IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE *FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL *DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR *SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER *CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, *OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE *OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. * * CONTRIBUTION AGREEMENT * * By contributing to the BVLC/caffe repository through pull-request, comment, * or otherwise, the contributor releases their content to the * license and copyright terms herein. * ***************** END Caffe Copyright Notice and Disclaimer ********************* * * Copyright (c) 2018 Microsoft * Licensed under The MIT License [see LICENSE for details] * \file modulated_deformable_im2col.cuh * \brief Function definitions of converting an image to * column matrix based on kernel, padding, dilation, and offset. * These functions are mainly used in deformable convolution operators. * \ref: https://arxiv.org/abs/1703.06211 * \author Yuwen Xiong, Haozhi Qi, Jifeng Dai, Xizhou Zhu, Han Hu, Dazhi Cheng */ #include <ATen/ATen.h> #include <c10/cuda/CUDAGuard.h> #include <float.h> #include <math.h> #include <stdio.h> #include <THC/THCAtomics.cuh> using namespace at; #define CUDA_KERNEL_LOOP(i, n) \ for (int i = blockIdx.x * blockDim.x + threadIdx.x; i < (n); \ i += blockDim.x * gridDim.x) namespace { const int CUDA_NUM_THREADS = 1024; const int kMaxGridNum = 65535; inline int GET_BLOCKS(const int N) { return std::min(kMaxGridNum, (N + CUDA_NUM_THREADS - 1) / CUDA_NUM_THREADS); } } template <typename scalar_t> __device__ scalar_t deformable_im2col_bilinear( const scalar_t* bottom_data, const int data_width, const int height, const int width, scalar_t h, scalar_t w) { int h_low = floor(h); int w_low = floor(w); int h_high = h_low + 1; int w_high = w_low + 1; scalar_t lh = h - h_low; scalar_t lw = w - w_low; scalar_t hh = 1 - lh, hw = 1 - lw; scalar_t v1 = 0; if (h_low >= 0 && w_low >= 0) v1 = bottom_data[h_low * data_width + w_low]; scalar_t v2 = 0; if (h_low >= 0 && w_high <= width - 1) v2 = bottom_data[h_low * data_width + w_high]; scalar_t v3 = 0; if (h_high <= height - 1 && w_low >= 0) v3 = bottom_data[h_high * data_width + w_low]; scalar_t v4 = 0; if (h_high <= height - 1 && w_high <= width - 1) v4 = bottom_data[h_high * data_width + w_high]; scalar_t w1 = hh * hw, w2 = hh * lw, w3 = lh * hw, w4 = lh * lw; scalar_t val = (w1 * v1 + w2 * v2 + w3 * v3 + w4 * v4); return val; } template <typename scalar_t> __device__ scalar_t get_gradient_weight( scalar_t argmax_h, scalar_t argmax_w, const int h, const int w, const int height, const int width) { if (argmax_h <= -1 || argmax_h >= height || argmax_w <= -1 || argmax_w >= width) { // empty return 0; } int argmax_h_low = floor(argmax_h); int argmax_w_low = floor(argmax_w); int argmax_h_high = argmax_h_low + 1; int argmax_w_high = argmax_w_low + 1; scalar_t weight = 0; if (h == argmax_h_low && w == argmax_w_low) weight = (h + 1 - argmax_h) * (w + 1 - argmax_w); if (h == argmax_h_low && w == argmax_w_high) weight = (h + 1 - argmax_h) * (argmax_w + 1 - w); if (h == argmax_h_high && w == argmax_w_low) weight = (argmax_h + 1 - h) * (w + 1 - argmax_w); if (h == argmax_h_high && w == argmax_w_high) weight = (argmax_h + 1 - h) * (argmax_w + 1 - w); return weight; } template <typename scalar_t> __device__ scalar_t get_coordinate_weight( scalar_t argmax_h, scalar_t argmax_w, const int height, const int width, const scalar_t* im_data, const int data_width, const int bp_dir) { if (argmax_h <= -1 || argmax_h >= height || argmax_w <= -1 || argmax_w >= width) { // empty return 0; } int argmax_h_low = floor(argmax_h); int argmax_w_low = floor(argmax_w); int argmax_h_high = argmax_h_low + 1; int argmax_w_high = argmax_w_low + 1; scalar_t weight = 0; if (bp_dir == 0) { if (argmax_h_low >= 0 && argmax_w_low >= 0) weight += -1 * (argmax_w_low + 1 - argmax_w) * im_data[argmax_h_low * data_width + argmax_w_low]; if (argmax_h_low >= 0 && argmax_w_high <= width - 1) weight += -1 * (argmax_w - argmax_w_low) * im_data[argmax_h_low * data_width + argmax_w_high]; if (argmax_h_high <= height - 1 && argmax_w_low >= 0) weight += (argmax_w_low + 1 - argmax_w) * im_data[argmax_h_high * data_width + argmax_w_low]; if (argmax_h_high <= height - 1 && argmax_w_high <= width - 1) weight += (argmax_w - argmax_w_low) * im_data[argmax_h_high * data_width + argmax_w_high]; } else if (bp_dir == 1) { if (argmax_h_low >= 0 && argmax_w_low >= 0) weight += -1 * (argmax_h_low + 1 - argmax_h) * im_data[argmax_h_low * data_width + argmax_w_low]; if (argmax_h_low >= 0 && argmax_w_high <= width - 1) weight += (argmax_h_low + 1 - argmax_h) * im_data[argmax_h_low * data_width + argmax_w_high]; if (argmax_h_high <= height - 1 && argmax_w_low >= 0) weight += -1 * (argmax_h - argmax_h_low) * im_data[argmax_h_high * data_width + argmax_w_low]; if (argmax_h_high <= height - 1 && argmax_w_high <= width - 1) weight += (argmax_h - argmax_h_low) * im_data[argmax_h_high * data_width + argmax_w_high]; } return weight; } template <typename scalar_t> __global__ void deformable_im2col_gpu_kernel( const int n, const scalar_t* data_im, const scalar_t* data_offset, const int height, const int width, const int kernel_h, const int kernel_w, const int pad_h, const int pad_w, const int stride_h, const int stride_w, const int dilation_h, const int dilation_w, const int channel_per_deformable_group, const int batch_size, const int num_channels, const int deformable_group, const int height_col, const int width_col, scalar_t* data_col) { CUDA_KERNEL_LOOP(index, n) { // index index of output matrix const int w_col = index % width_col; const int h_col = (index / width_col) % height_col; const int b_col = (index / width_col / height_col) % batch_size; const int c_im = (index / width_col / height_col) / batch_size; const int c_col = c_im * kernel_h * kernel_w; // compute deformable group index const int deformable_group_index = c_im / channel_per_deformable_group; const int h_in = h_col * stride_h - pad_h; const int w_in = w_col * stride_w - pad_w; scalar_t* data_col_ptr = data_col + ((c_col * batch_size + b_col) * height_col + h_col) * width_col + w_col; // const scalar_t* data_im_ptr = data_im + ((b_col * num_channels + c_im) * // height + h_in) * width + w_in; const scalar_t* data_im_ptr = data_im + (b_col * num_channels + c_im) * height * width; const scalar_t* data_offset_ptr = data_offset + (b_col * deformable_group + deformable_group_index) * 2 * kernel_h * kernel_w * height_col * width_col; for (int i = 0; i < kernel_h; ++i) { for (int j = 0; j < kernel_w; ++j) { const int data_offset_h_ptr = ((2 * (i * kernel_w + j)) * height_col + h_col) * width_col + w_col; const int data_offset_w_ptr = ((2 * (i * kernel_w + j) + 1) * height_col + h_col) * width_col + w_col; const scalar_t offset_h = data_offset_ptr[data_offset_h_ptr]; const scalar_t offset_w = data_offset_ptr[data_offset_w_ptr]; scalar_t val = static_cast<scalar_t>(0); const scalar_t h_im = h_in + i * dilation_h + offset_h; const scalar_t w_im = w_in + j * dilation_w + offset_w; if (h_im > -1 && w_im > -1 && h_im < height && w_im < width) { // const scalar_t map_h = i * dilation_h + offset_h; // const scalar_t map_w = j * dilation_w + offset_w; // const int cur_height = height - h_in; // const int cur_width = width - w_in; // val = deformable_im2col_bilinear(data_im_ptr, width, cur_height, // cur_width, map_h, map_w); val = deformable_im2col_bilinear( data_im_ptr, width, height, width, h_im, w_im); } *data_col_ptr = val; data_col_ptr += batch_size * height_col * width_col; } } } } template <typename scalar_t> __global__ void deformable_col2im_gpu_kernel( const int n, const scalar_t* data_col, const scalar_t* data_offset, const int channels, const int height, const int width, const int kernel_h, const int kernel_w, const int pad_h, const int pad_w, const int stride_h, const int stride_w, const int dilation_h, const int dilation_w, const int channel_per_deformable_group, const int batch_size, const int deformable_group, const int height_col, const int width_col, scalar_t* grad_im) { CUDA_KERNEL_LOOP(index, n) { const int j = (index / width_col / height_col / batch_size) % kernel_w; const int i = (index / width_col / height_col / batch_size / kernel_w) % kernel_h; const int c = index / width_col / height_col / batch_size / kernel_w / kernel_h; // compute the start and end of the output const int deformable_group_index = c / channel_per_deformable_group; int w_out = index % width_col; int h_out = (index / width_col) % height_col; int b = (index / width_col / height_col) % batch_size; int w_in = w_out * stride_w - pad_w; int h_in = h_out * stride_h - pad_h; const scalar_t* data_offset_ptr = data_offset + (b * deformable_group + deformable_group_index) * 2 * kernel_h * kernel_w * height_col * width_col; const int data_offset_h_ptr = ((2 * (i * kernel_w + j)) * height_col + h_out) * width_col + w_out; const int data_offset_w_ptr = ((2 * (i * kernel_w + j) + 1) * height_col + h_out) * width_col + w_out; const scalar_t offset_h = data_offset_ptr[data_offset_h_ptr]; const scalar_t offset_w = data_offset_ptr[data_offset_w_ptr]; const scalar_t cur_inv_h_data = h_in + i * dilation_h + offset_h; const scalar_t cur_inv_w_data = w_in + j * dilation_w + offset_w; const scalar_t cur_top_grad = data_col[index]; const int cur_h = (int)cur_inv_h_data; const int cur_w = (int)cur_inv_w_data; for (int dy = -2; dy <= 2; dy++) { for (int dx = -2; dx <= 2; dx++) { if (cur_h + dy >= 0 && cur_h + dy < height && cur_w + dx >= 0 && cur_w + dx < width && abs(cur_inv_h_data - (cur_h + dy)) < 1 && abs(cur_inv_w_data - (cur_w + dx)) < 1) { int cur_bottom_grad_pos = ((b * channels + c) * height + cur_h + dy) * width + cur_w + dx; scalar_t weight = get_gradient_weight( cur_inv_h_data, cur_inv_w_data, cur_h + dy, cur_w + dx, height, width); atomicAdd(grad_im + cur_bottom_grad_pos, weight * cur_top_grad); } } } } } template <typename scalar_t> __global__ void deformable_col2im_coord_gpu_kernel( const int n, const scalar_t* data_col, const scalar_t* data_im, const scalar_t* data_offset, const int channels, const int height, const int width, const int kernel_h, const int kernel_w, const int pad_h, const int pad_w, const int stride_h, const int stride_w, const int dilation_h, const int dilation_w, const int channel_per_deformable_group, const int batch_size, const int offset_channels, const int deformable_group, const int height_col, const int width_col, scalar_t* grad_offset) { CUDA_KERNEL_LOOP(index, n) { scalar_t val = 0; int w = index % width_col; int h = (index / width_col) % height_col; int c = (index / width_col / height_col) % offset_channels; int b = (index / width_col / height_col) / offset_channels; // compute the start and end of the output const int deformable_group_index = c / (2 * kernel_h * kernel_w); const int col_step = kernel_h * kernel_w; int cnt = 0; const scalar_t* data_col_ptr = data_col + deformable_group_index * channel_per_deformable_group * batch_size * width_col * height_col; const scalar_t* data_im_ptr = data_im + (b * deformable_group + deformable_group_index) * channel_per_deformable_group / kernel_h / kernel_w * height * width; const scalar_t* data_offset_ptr = data_offset + (b * deformable_group + deformable_group_index) * 2 * kernel_h * kernel_w * height_col * width_col; const int offset_c = c - deformable_group_index * 2 * kernel_h * kernel_w; for (int col_c = (offset_c / 2); col_c < channel_per_deformable_group; col_c += col_step) { const int col_pos = (((col_c * batch_size + b) * height_col) + h) * width_col + w; const int bp_dir = offset_c % 2; int j = (col_pos / width_col / height_col / batch_size) % kernel_w; int i = (col_pos / width_col / height_col / batch_size / kernel_w) % kernel_h; int w_out = col_pos % width_col; int h_out = (col_pos / width_col) % height_col; int w_in = w_out * stride_w - pad_w; int h_in = h_out * stride_h - pad_h; const int data_offset_h_ptr = (((2 * (i * kernel_w + j)) * height_col + h_out) * width_col + w_out); const int data_offset_w_ptr = (((2 * (i * kernel_w + j) + 1) * height_col + h_out) * width_col + w_out); const scalar_t offset_h = data_offset_ptr[data_offset_h_ptr]; const scalar_t offset_w = data_offset_ptr[data_offset_w_ptr]; scalar_t inv_h = h_in + i * dilation_h + offset_h; scalar_t inv_w = w_in + j * dilation_w + offset_w; if (inv_h <= -1 || inv_w <= -1 || inv_h >= height || inv_w >= width) { inv_h = inv_w = -2; } const scalar_t weight = get_coordinate_weight( inv_h, inv_w, height, width, data_im_ptr + cnt * height * width, width, bp_dir); val += weight * data_col_ptr[col_pos]; cnt += 1; } grad_offset[index] = val; } } namespace detectron2 { void deformable_im2col( const at::Tensor data_im, const at::Tensor data_offset, const int channels, const int height, const int width, const int ksize_h, const int ksize_w, const int pad_h, const int pad_w, const int stride_h, const int stride_w, const int dilation_h, const int dilation_w, const int parallel_imgs, const int deformable_group, at::Tensor data_col) { // num_axes should be smaller than block size // todo: check parallel_imgs is correctly passed in int height_col = (height + 2 * pad_h - (dilation_h * (ksize_h - 1) + 1)) / stride_h + 1; int width_col = (width + 2 * pad_w - (dilation_w * (ksize_w - 1) + 1)) / stride_w + 1; int num_kernels = channels * height_col * width_col * parallel_imgs; int channel_per_deformable_group = channels / deformable_group; at::cuda::CUDAGuard device_guard(data_im.device()); cudaStream_t stream = at::cuda::getCurrentCUDAStream(); AT_DISPATCH_FLOATING_TYPES_AND_HALF( data_im.type(), "deformable_im2col_gpu", ([&] { const scalar_t* data_im_ = data_im.data_ptr<scalar_t>(); const scalar_t* data_offset_ = data_offset.data_ptr<scalar_t>(); scalar_t* data_col_ = data_col.data_ptr<scalar_t>(); deformable_im2col_gpu_kernel<<< GET_BLOCKS(num_kernels), CUDA_NUM_THREADS, 0, stream>>>( num_kernels, data_im_, data_offset_, height, width, ksize_h, ksize_w, pad_h, pad_w, stride_h, stride_w, dilation_h, dilation_w, channel_per_deformable_group, parallel_imgs, channels, deformable_group, height_col, width_col, data_col_); })); cudaError_t err = cudaGetLastError(); if (err != cudaSuccess) { printf("error in deformable_im2col: %s\n", cudaGetErrorString(err)); } } void deformable_col2im( const at::Tensor data_col, const at::Tensor data_offset, const int channels, const int height, const int width, const int ksize_h, const int ksize_w, const int pad_h, const int pad_w, const int stride_h, const int stride_w, const int dilation_h, const int dilation_w, const int parallel_imgs, const int deformable_group, at::Tensor grad_im) { // todo: make sure parallel_imgs is passed in correctly int height_col = (height + 2 * pad_h - (dilation_h * (ksize_h - 1) + 1)) / stride_h + 1; int width_col = (width + 2 * pad_w - (dilation_w * (ksize_w - 1) + 1)) / stride_w + 1; int num_kernels = channels * ksize_h * ksize_w * height_col * width_col * parallel_imgs; int channel_per_deformable_group = channels / deformable_group; at::cuda::CUDAGuard device_guard(data_col.device()); cudaStream_t stream = at::cuda::getCurrentCUDAStream(); AT_DISPATCH_FLOATING_TYPES_AND_HALF( data_col.type(), "deformable_col2im_gpu", ([&] { const scalar_t* data_col_ = data_col.data_ptr<scalar_t>(); const scalar_t* data_offset_ = data_offset.data_ptr<scalar_t>(); scalar_t* grad_im_ = grad_im.data_ptr<scalar_t>(); deformable_col2im_gpu_kernel<<< GET_BLOCKS(num_kernels), CUDA_NUM_THREADS, 0, stream>>>( num_kernels, data_col_, data_offset_, channels, height, width, ksize_h, ksize_w, pad_h, pad_w, stride_h, stride_w, dilation_h, dilation_w, channel_per_deformable_group, parallel_imgs, deformable_group, height_col, width_col, grad_im_); })); cudaError_t err = cudaGetLastError(); if (err != cudaSuccess) { printf("error in deformable_col2im: %s\n", cudaGetErrorString(err)); } } void deformable_col2im_coord( const at::Tensor data_col, const at::Tensor data_im, const at::Tensor data_offset, const int channels, const int height, const int width, const int ksize_h, const int ksize_w, const int pad_h, const int pad_w, const int stride_h, const int stride_w, const int dilation_h, const int dilation_w, const int parallel_imgs, const int deformable_group, at::Tensor grad_offset) { int height_col = (height + 2 * pad_h - (dilation_h * (ksize_h - 1) + 1)) / stride_h + 1; int width_col = (width + 2 * pad_w - (dilation_w * (ksize_w - 1) + 1)) / stride_w + 1; int num_kernels = height_col * width_col * 2 * ksize_h * ksize_w * deformable_group * parallel_imgs; int channel_per_deformable_group = channels * ksize_h * ksize_w / deformable_group; at::cuda::CUDAGuard device_guard(data_col.device()); cudaStream_t stream = at::cuda::getCurrentCUDAStream(); AT_DISPATCH_FLOATING_TYPES_AND_HALF( data_col.type(), "deformable_col2im_coord_gpu", ([&] { const scalar_t* data_col_ = data_col.data_ptr<scalar_t>(); const scalar_t* data_im_ = data_im.data_ptr<scalar_t>(); const scalar_t* data_offset_ = data_offset.data_ptr<scalar_t>(); scalar_t* grad_offset_ = grad_offset.data_ptr<scalar_t>(); deformable_col2im_coord_gpu_kernel<<< GET_BLOCKS(num_kernels), CUDA_NUM_THREADS, 0, stream>>>( num_kernels, data_col_, data_im_, data_offset_, channels, height, width, ksize_h, ksize_w, pad_h, pad_w, stride_h, stride_w, dilation_h, dilation_w, channel_per_deformable_group, parallel_imgs, 2 * ksize_h * ksize_w * deformable_group, deformable_group, height_col, width_col, grad_offset_); })); } } // namespace detectron2 template <typename scalar_t> __device__ scalar_t dmcn_im2col_bilinear( const scalar_t* bottom_data, const int data_width, const int height, const int width, scalar_t h, scalar_t w) { int h_low = floor(h); int w_low = floor(w); int h_high = h_low + 1; int w_high = w_low + 1; scalar_t lh = h - h_low; scalar_t lw = w - w_low; scalar_t hh = 1 - lh, hw = 1 - lw; scalar_t v1 = 0; if (h_low >= 0 && w_low >= 0) v1 = bottom_data[h_low * data_width + w_low]; scalar_t v2 = 0; if (h_low >= 0 && w_high <= width - 1) v2 = bottom_data[h_low * data_width + w_high]; scalar_t v3 = 0; if (h_high <= height - 1 && w_low >= 0) v3 = bottom_data[h_high * data_width + w_low]; scalar_t v4 = 0; if (h_high <= height - 1 && w_high <= width - 1) v4 = bottom_data[h_high * data_width + w_high]; scalar_t w1 = hh * hw, w2 = hh * lw, w3 = lh * hw, w4 = lh * lw; scalar_t val = (w1 * v1 + w2 * v2 + w3 * v3 + w4 * v4); return val; } template <typename scalar_t> __device__ scalar_t dmcn_get_gradient_weight( scalar_t argmax_h, scalar_t argmax_w, const int h, const int w, const int height, const int width) { if (argmax_h <= -1 || argmax_h >= height || argmax_w <= -1 || argmax_w >= width) { // empty return 0; } int argmax_h_low = floor(argmax_h); int argmax_w_low = floor(argmax_w); int argmax_h_high = argmax_h_low + 1; int argmax_w_high = argmax_w_low + 1; scalar_t weight = 0; if (h == argmax_h_low && w == argmax_w_low) weight = (h + 1 - argmax_h) * (w + 1 - argmax_w); if (h == argmax_h_low && w == argmax_w_high) weight = (h + 1 - argmax_h) * (argmax_w + 1 - w); if (h == argmax_h_high && w == argmax_w_low) weight = (argmax_h + 1 - h) * (w + 1 - argmax_w); if (h == argmax_h_high && w == argmax_w_high) weight = (argmax_h + 1 - h) * (argmax_w + 1 - w); return weight; } template <typename scalar_t> __device__ scalar_t dmcn_get_coordinate_weight( scalar_t argmax_h, scalar_t argmax_w, const int height, const int width, const scalar_t* im_data, const int data_width, const int bp_dir) { if (argmax_h <= -1 || argmax_h >= height || argmax_w <= -1 || argmax_w >= width) { // empty return 0; } int argmax_h_low = floor(argmax_h); int argmax_w_low = floor(argmax_w); int argmax_h_high = argmax_h_low + 1; int argmax_w_high = argmax_w_low + 1; scalar_t weight = 0; if (bp_dir == 0) { if (argmax_h_low >= 0 && argmax_w_low >= 0) weight += -1 * (argmax_w_low + 1 - argmax_w) * im_data[argmax_h_low * data_width + argmax_w_low]; if (argmax_h_low >= 0 && argmax_w_high <= width - 1) weight += -1 * (argmax_w - argmax_w_low) * im_data[argmax_h_low * data_width + argmax_w_high]; if (argmax_h_high <= height - 1 && argmax_w_low >= 0) weight += (argmax_w_low + 1 - argmax_w) * im_data[argmax_h_high * data_width + argmax_w_low]; if (argmax_h_high <= height - 1 && argmax_w_high <= width - 1) weight += (argmax_w - argmax_w_low) * im_data[argmax_h_high * data_width + argmax_w_high]; } else if (bp_dir == 1) { if (argmax_h_low >= 0 && argmax_w_low >= 0) weight += -1 * (argmax_h_low + 1 - argmax_h) * im_data[argmax_h_low * data_width + argmax_w_low]; if (argmax_h_low >= 0 && argmax_w_high <= width - 1) weight += (argmax_h_low + 1 - argmax_h) * im_data[argmax_h_low * data_width + argmax_w_high]; if (argmax_h_high <= height - 1 && argmax_w_low >= 0) weight += -1 * (argmax_h - argmax_h_low) * im_data[argmax_h_high * data_width + argmax_w_low]; if (argmax_h_high <= height - 1 && argmax_w_high <= width - 1) weight += (argmax_h - argmax_h_low) * im_data[argmax_h_high * data_width + argmax_w_high]; } return weight; } template <typename scalar_t> __global__ void modulated_deformable_im2col_gpu_kernel( const int n, const scalar_t* data_im, const scalar_t* data_offset, const scalar_t* data_mask, const int height, const int width, const int kernel_h, const int kernel_w, const int pad_h, const int pad_w, const int stride_h, const int stride_w, const int dilation_h, const int dilation_w, const int channel_per_deformable_group, const int batch_size, const int num_channels, const int deformable_group, const int height_col, const int width_col, scalar_t* data_col) { CUDA_KERNEL_LOOP(index, n) { // index index of output matrix const int w_col = index % width_col; const int h_col = (index / width_col) % height_col; const int b_col = (index / width_col / height_col) % batch_size; const int c_im = (index / width_col / height_col) / batch_size; const int c_col = c_im * kernel_h * kernel_w; // compute deformable group index const int deformable_group_index = c_im / channel_per_deformable_group; const int h_in = h_col * stride_h - pad_h; const int w_in = w_col * stride_w - pad_w; scalar_t* data_col_ptr = data_col + ((c_col * batch_size + b_col) * height_col + h_col) * width_col + w_col; // const float* data_im_ptr = data_im + ((b_col * num_channels + c_im) * // height + h_in) * width + w_in; const scalar_t* data_im_ptr = data_im + (b_col * num_channels + c_im) * height * width; const scalar_t* data_offset_ptr = data_offset + (b_col * deformable_group + deformable_group_index) * 2 * kernel_h * kernel_w * height_col * width_col; const scalar_t* data_mask_ptr = data_mask + (b_col * deformable_group + deformable_group_index) * kernel_h * kernel_w * height_col * width_col; for (int i = 0; i < kernel_h; ++i) { for (int j = 0; j < kernel_w; ++j) { const int data_offset_h_ptr = ((2 * (i * kernel_w + j)) * height_col + h_col) * width_col + w_col; const int data_offset_w_ptr = ((2 * (i * kernel_w + j) + 1) * height_col + h_col) * width_col + w_col; const int data_mask_hw_ptr = ((i * kernel_w + j) * height_col + h_col) * width_col + w_col; const scalar_t offset_h = data_offset_ptr[data_offset_h_ptr]; const scalar_t offset_w = data_offset_ptr[data_offset_w_ptr]; const scalar_t mask = data_mask_ptr[data_mask_hw_ptr]; scalar_t val = static_cast<scalar_t>(0); const scalar_t h_im = h_in + i * dilation_h + offset_h; const scalar_t w_im = w_in + j * dilation_w + offset_w; // if (h_im >= 0 && w_im >= 0 && h_im < height && w_im < width) { if (h_im > -1 && w_im > -1 && h_im < height && w_im < width) { // const float map_h = i * dilation_h + offset_h; // const float map_w = j * dilation_w + offset_w; // const int cur_height = height - h_in; // const int cur_width = width - w_in; // val = dmcn_im2col_bilinear(data_im_ptr, width, cur_height, // cur_width, map_h, map_w); val = dmcn_im2col_bilinear( data_im_ptr, width, height, width, h_im, w_im); } *data_col_ptr = val * mask; data_col_ptr += batch_size * height_col * width_col; // data_col_ptr += height_col * width_col; } } } } template <typename scalar_t> __global__ void modulated_deformable_col2im_gpu_kernel( const int n, const scalar_t* data_col, const scalar_t* data_offset, const scalar_t* data_mask, const int channels, const int height, const int width, const int kernel_h, const int kernel_w, const int pad_h, const int pad_w, const int stride_h, const int stride_w, const int dilation_h, const int dilation_w, const int channel_per_deformable_group, const int batch_size, const int deformable_group, const int height_col, const int width_col, scalar_t* grad_im) { CUDA_KERNEL_LOOP(index, n) { const int j = (index / width_col / height_col / batch_size) % kernel_w; const int i = (index / width_col / height_col / batch_size / kernel_w) % kernel_h; const int c = index / width_col / height_col / batch_size / kernel_w / kernel_h; // compute the start and end of the output const int deformable_group_index = c / channel_per_deformable_group; int w_out = index % width_col; int h_out = (index / width_col) % height_col; int b = (index / width_col / height_col) % batch_size; int w_in = w_out * stride_w - pad_w; int h_in = h_out * stride_h - pad_h; const scalar_t* data_offset_ptr = data_offset + (b * deformable_group + deformable_group_index) * 2 * kernel_h * kernel_w * height_col * width_col; const scalar_t* data_mask_ptr = data_mask + (b * deformable_group + deformable_group_index) * kernel_h * kernel_w * height_col * width_col; const int data_offset_h_ptr = ((2 * (i * kernel_w + j)) * height_col + h_out) * width_col + w_out; const int data_offset_w_ptr = ((2 * (i * kernel_w + j) + 1) * height_col + h_out) * width_col + w_out; const int data_mask_hw_ptr = ((i * kernel_w + j) * height_col + h_out) * width_col + w_out; const scalar_t offset_h = data_offset_ptr[data_offset_h_ptr]; const scalar_t offset_w = data_offset_ptr[data_offset_w_ptr]; const scalar_t mask = data_mask_ptr[data_mask_hw_ptr]; const scalar_t cur_inv_h_data = h_in + i * dilation_h + offset_h; const scalar_t cur_inv_w_data = w_in + j * dilation_w + offset_w; const scalar_t cur_top_grad = data_col[index] * mask; const int cur_h = (int)cur_inv_h_data; const int cur_w = (int)cur_inv_w_data; for (int dy = -2; dy <= 2; dy++) { for (int dx = -2; dx <= 2; dx++) { if (cur_h + dy >= 0 && cur_h + dy < height && cur_w + dx >= 0 && cur_w + dx < width && abs(cur_inv_h_data - (cur_h + dy)) < 1 && abs(cur_inv_w_data - (cur_w + dx)) < 1) { int cur_bottom_grad_pos = ((b * channels + c) * height + cur_h + dy) * width + cur_w + dx; scalar_t weight = dmcn_get_gradient_weight( cur_inv_h_data, cur_inv_w_data, cur_h + dy, cur_w + dx, height, width); atomicAdd(grad_im + cur_bottom_grad_pos, weight * cur_top_grad); } } } } } template <typename scalar_t> __global__ void modulated_deformable_col2im_coord_gpu_kernel( const int n, const scalar_t* data_col, const scalar_t* data_im, const scalar_t* data_offset, const scalar_t* data_mask, const int channels, const int height, const int width, const int kernel_h, const int kernel_w, const int pad_h, const int pad_w, const int stride_h, const int stride_w, const int dilation_h, const int dilation_w, const int channel_per_deformable_group, const int batch_size, const int offset_channels, const int deformable_group, const int height_col, const int width_col, scalar_t* grad_offset, scalar_t* grad_mask) { CUDA_KERNEL_LOOP(index, n) { scalar_t val = 0, mval = 0; int w = index % width_col; int h = (index / width_col) % height_col; int c = (index / width_col / height_col) % offset_channels; int b = (index / width_col / height_col) / offset_channels; // compute the start and end of the output const int deformable_group_index = c / (2 * kernel_h * kernel_w); const int col_step = kernel_h * kernel_w; int cnt = 0; const scalar_t* data_col_ptr = data_col + deformable_group_index * channel_per_deformable_group * batch_size * width_col * height_col; const scalar_t* data_im_ptr = data_im + (b * deformable_group + deformable_group_index) * channel_per_deformable_group / kernel_h / kernel_w * height * width; const scalar_t* data_offset_ptr = data_offset + (b * deformable_group + deformable_group_index) * 2 * kernel_h * kernel_w * height_col * width_col; const scalar_t* data_mask_ptr = data_mask + (b * deformable_group + deformable_group_index) * kernel_h * kernel_w * height_col * width_col; const int offset_c = c - deformable_group_index * 2 * kernel_h * kernel_w; for (int col_c = (offset_c / 2); col_c < channel_per_deformable_group; col_c += col_step) { const int col_pos = (((col_c * batch_size + b) * height_col) + h) * width_col + w; const int bp_dir = offset_c % 2; int j = (col_pos / width_col / height_col / batch_size) % kernel_w; int i = (col_pos / width_col / height_col / batch_size / kernel_w) % kernel_h; int w_out = col_pos % width_col; int h_out = (col_pos / width_col) % height_col; int w_in = w_out * stride_w - pad_w; int h_in = h_out * stride_h - pad_h; const int data_offset_h_ptr = (((2 * (i * kernel_w + j)) * height_col + h_out) * width_col + w_out); const int data_offset_w_ptr = (((2 * (i * kernel_w + j) + 1) * height_col + h_out) * width_col + w_out); const int data_mask_hw_ptr = (((i * kernel_w + j) * height_col + h_out) * width_col + w_out); const scalar_t offset_h = data_offset_ptr[data_offset_h_ptr]; const scalar_t offset_w = data_offset_ptr[data_offset_w_ptr]; const scalar_t mask = data_mask_ptr[data_mask_hw_ptr]; scalar_t inv_h = h_in + i * dilation_h + offset_h; scalar_t inv_w = w_in + j * dilation_w + offset_w; if (inv_h <= -1 || inv_w <= -1 || inv_h >= height || inv_w >= width) { inv_h = inv_w = -2; } else { mval += data_col_ptr[col_pos] * dmcn_im2col_bilinear( data_im_ptr + cnt * height * width, width, height, width, inv_h, inv_w); } const scalar_t weight = dmcn_get_coordinate_weight( inv_h, inv_w, height, width, data_im_ptr + cnt * height * width, width, bp_dir); val += weight * data_col_ptr[col_pos] * mask; cnt += 1; } // KERNEL_ASSIGN(grad_offset[index], offset_req, val); grad_offset[index] = val; if (offset_c % 2 == 0) // KERNEL_ASSIGN(grad_mask[(((b * deformable_group + // deformable_group_index) * kernel_h * kernel_w + offset_c / 2) * // height_col + h) * width_col + w], mask_req, mval); grad_mask [(((b * deformable_group + deformable_group_index) * kernel_h * kernel_w + offset_c / 2) * height_col + h) * width_col + w] = mval; } } namespace detectron2 { void modulated_deformable_im2col_cuda( const at::Tensor data_im, const at::Tensor data_offset, const at::Tensor data_mask, const int batch_size, const int channels, const int height_im, const int width_im, const int height_col, const int width_col, const int kernel_h, const int kenerl_w, const int pad_h, const int pad_w, const int stride_h, const int stride_w, const int dilation_h, const int dilation_w, const int deformable_group, at::Tensor data_col) { // num_axes should be smaller than block size const int channel_per_deformable_group = channels / deformable_group; const int num_kernels = channels * batch_size * height_col * width_col; at::cuda::CUDAGuard device_guard(data_im.device()); cudaStream_t stream = at::cuda::getCurrentCUDAStream(); AT_DISPATCH_FLOATING_TYPES_AND_HALF( data_im.type(), "modulated_deformable_im2col_gpu", ([&] { const scalar_t* data_im_ = data_im.data_ptr<scalar_t>(); const scalar_t* data_offset_ = data_offset.data_ptr<scalar_t>(); const scalar_t* data_mask_ = data_mask.data_ptr<scalar_t>(); scalar_t* data_col_ = data_col.data_ptr<scalar_t>(); modulated_deformable_im2col_gpu_kernel<<< GET_BLOCKS(num_kernels), CUDA_NUM_THREADS, 0, stream>>>( num_kernels, data_im_, data_offset_, data_mask_, height_im, width_im, kernel_h, kenerl_w, pad_h, pad_w, stride_h, stride_w, dilation_h, dilation_w, channel_per_deformable_group, batch_size, channels, deformable_group, height_col, width_col, data_col_); })); cudaError_t err = cudaGetLastError(); if (err != cudaSuccess) { printf( "error in modulated_deformable_im2col_cuda: %s\n", cudaGetErrorString(err)); } } void modulated_deformable_col2im_cuda( const at::Tensor data_col, const at::Tensor data_offset, const at::Tensor data_mask, const int batch_size, const int channels, const int height_im, const int width_im, const int height_col, const int width_col, const int kernel_h, const int kernel_w, const int pad_h, const int pad_w, const int stride_h, const int stride_w, const int dilation_h, const int dilation_w, const int deformable_group, at::Tensor grad_im) { const int channel_per_deformable_group = channels / deformable_group; const int num_kernels = channels * kernel_h * kernel_w * batch_size * height_col * width_col; at::cuda::CUDAGuard device_guard(data_col.device()); cudaStream_t stream = at::cuda::getCurrentCUDAStream(); AT_DISPATCH_FLOATING_TYPES_AND_HALF( data_col.type(), "modulated_deformable_col2im_gpu", ([&] { const scalar_t* data_col_ = data_col.data_ptr<scalar_t>(); const scalar_t* data_offset_ = data_offset.data_ptr<scalar_t>(); const scalar_t* data_mask_ = data_mask.data_ptr<scalar_t>(); scalar_t* grad_im_ = grad_im.data_ptr<scalar_t>(); modulated_deformable_col2im_gpu_kernel<<< GET_BLOCKS(num_kernels), CUDA_NUM_THREADS, 0, stream>>>( num_kernels, data_col_, data_offset_, data_mask_, channels, height_im, width_im, kernel_h, kernel_w, pad_h, pad_w, stride_h, stride_w, dilation_h, dilation_w, channel_per_deformable_group, batch_size, deformable_group, height_col, width_col, grad_im_); })); cudaError_t err = cudaGetLastError(); if (err != cudaSuccess) { printf( "error in modulated_deformable_col2im_cuda: %s\n", cudaGetErrorString(err)); } } void modulated_deformable_col2im_coord_cuda( const at::Tensor data_col, const at::Tensor data_im, const at::Tensor data_offset, const at::Tensor data_mask, const int batch_size, const int channels, const int height_im, const int width_im, const int height_col, const int width_col, const int kernel_h, const int kernel_w, const int pad_h, const int pad_w, const int stride_h, const int stride_w, const int dilation_h, const int dilation_w, const int deformable_group, at::Tensor grad_offset, at::Tensor grad_mask) { const int num_kernels = batch_size * height_col * width_col * 2 * kernel_h * kernel_w * deformable_group; const int channel_per_deformable_group = channels * kernel_h * kernel_w / deformable_group; at::cuda::CUDAGuard device_guard(data_col.device()); cudaStream_t stream = at::cuda::getCurrentCUDAStream(); AT_DISPATCH_FLOATING_TYPES_AND_HALF( data_col.type(), "modulated_deformable_col2im_coord_gpu", ([&] { const scalar_t* data_col_ = data_col.data_ptr<scalar_t>(); const scalar_t* data_im_ = data_im.data_ptr<scalar_t>(); const scalar_t* data_offset_ = data_offset.data_ptr<scalar_t>(); const scalar_t* data_mask_ = data_mask.data_ptr<scalar_t>(); scalar_t* grad_offset_ = grad_offset.data_ptr<scalar_t>(); scalar_t* grad_mask_ = grad_mask.data_ptr<scalar_t>(); modulated_deformable_col2im_coord_gpu_kernel<<< GET_BLOCKS(num_kernels), CUDA_NUM_THREADS, 0, stream>>>( num_kernels, data_col_, data_im_, data_offset_, data_mask_, channels, height_im, width_im, kernel_h, kernel_w, pad_h, pad_w, stride_h, stride_w, dilation_h, dilation_w, channel_per_deformable_group, batch_size, 2 * kernel_h * kernel_w * deformable_group, deformable_group, height_col, width_col, grad_offset_, grad_mask_); })); cudaError_t err = cudaGetLastError(); if (err != cudaSuccess) { printf( "error in modulated_deformable_col2im_coord_cuda: %s\n", cudaGetErrorString(err)); } } } // namespace detectron2
b8c0b3518c9d585663fe6bd966cc5637b3be0ba7.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include "utils.h" #define CUDA_MAX_THREADS 1024 // this is safe, in reality 256 is our limit /* * Description: * this function avg-pools an input 3D tensor along dimensions 1 and 2 * 3D input, 3D output */ __global__ void subsample(float *input, float *output, int input_n, int input_h, int input_w, int kH, int kW, int dH, int dW) { // iterators int xx, yy; // output size int output_w = (input_w - kW) / dW + 1; int output_h = (input_h - kH) / dH + 1; // compute offsets based on thread/block ID int o = blockIdx.x; int i = o; int xx_start = threadIdx.x; int xx_end = output_w; int xx_step = blockDim.x; int yy_start = blockDim.y*blockIdx.y + threadIdx.y; int yy_end = output_h; int yy_step = blockDim.y*gridDim.y; // select input/output plane output = output + o*output_w*output_h; input = input + i*input_w*input_h; // For all output pixels... for(yy = yy_start; yy < yy_end; yy+=yy_step) { for(xx = xx_start; xx < xx_end; xx+=xx_step) { // Compute the mean of the input image... float *ptr_input = input + yy*dH*input_w + xx*dW; float *ptr_output = output + yy*output_w + xx; float sum = 0; int kx, ky; for(ky = 0; ky < kH; ky++) { for(kx = 0; kx < kW; kx++) sum += ptr_input[kx]; ptr_input += input_w; // next input line } // Update output *ptr_output = sum/float(kW*kH); } } } static int cunn_SpatialAveragePooling_updateOutput(lua_State *L) { THCState *state = getCutorchState(L); THCudaTensor *input = (THCudaTensor *)luaT_checkudata(L, 2, "torch.CudaTensor"); int kW = luaT_getfieldcheckint(L, 1, "kW"); int kH = luaT_getfieldcheckint(L, 1, "kH"); int dW = luaT_getfieldcheckint(L, 1, "dW"); int dH = luaT_getfieldcheckint(L, 1, "dH"); THCudaTensor *output = (THCudaTensor *)luaT_getfieldcheckudata(L, 1, "output", "torch.CudaTensor"); float *output_data; float *input_data; luaL_argcheck(L, input->nDimension == 3 || input->nDimension == 4, 2, "3D or 4D (batch) tensor expected"); if (input->nDimension == 3) { long nInputCols = input->size[2]; long nInputRows = input->size[1]; long nOutputCols = (nInputCols - kW) / dW + 1; long nOutputRows = (nInputRows - kH) / dH + 1; long nInputPlane = input->size[0]; luaL_argcheck(L, nInputCols >= kW && nInputRows >= kH, 2, "input image smaller than kernel size"); input = THCudaTensor_newContiguous(state, input); input_data = THCudaTensor_data(state, input); THCudaTensor_resize3d(state, output, nInputPlane, nOutputRows, nOutputCols); output_data = THCudaTensor_data(state, output); // cuda blocks & threads: int yblocks = (int)(16L / nInputPlane); yblocks = yblocks < 1 ? 1 : yblocks; dim3 blocks(nInputPlane,yblocks); dim3 threads(32,8); // run subsample kernel hipLaunchKernelGGL(( subsample) , dim3(blocks), dim3(threads), 0, 0, input_data, output_data, nInputPlane, nInputRows, nInputCols, kH, kW, dH, dW); } else { long nInputCols = input->size[3]; long nInputRows = input->size[2]; long nbatch = input->size[0]; long nOutputCols = (nInputCols - kW) / dW + 1; long nOutputRows = (nInputRows - kH) / dH + 1; long nInputPlane = input->size[1]; luaL_argcheck(L, nInputCols >= kW && nInputRows >= kH, 2, "input image smaller than kernel size"); input = THCudaTensor_newContiguous(state, input); input_data = THCudaTensor_data(state, input); THCudaTensor_resize4d(state, output, nbatch, nInputPlane, nOutputRows, nOutputCols); output_data = THCudaTensor_data(state, output); // cuda blocks & threads: int yblocks = (int)(16L / nInputPlane); yblocks = yblocks < 1 ? 1 : yblocks; dim3 blocks(nInputPlane*nbatch,yblocks); dim3 threads(32,8); // run subsample kernel hipLaunchKernelGGL(( subsample) , dim3(blocks), dim3(threads), 0, 0, input_data, output_data, nInputPlane, nInputRows, nInputCols, kH, kW, dH, dW); } // clean THCudaTensor_free(state, input); // check for errors hipError_t err = hipGetLastError(); if (err != hipSuccess) { printf("error in SpatialAveragePooling.updateOutput: %s\n", hipGetErrorString(err)); THError("aborting"); } return 1; } /* * Description: * this function computes the gradInput from gradOutput */ __global__ void subgradinput(float *gradInput, float *gradOutput, int input_n, int input_h, int input_w, int kH, int kW, int dH, int dW) { // iterators int xx, yy; // output size int output_w = (input_w - kW) / dW + 1; int output_h = (input_h - kH) / dH + 1; // compute offsets based on thread/block ID int o = blockIdx.x; int i = o; int xx_start = threadIdx.x; int xx_end = output_w; int xx_step = blockDim.x; int yy_start = blockDim.y*blockIdx.y + threadIdx.y; int yy_end = output_h; int yy_step = blockDim.y*gridDim.y; // select input/output plane gradOutput = gradOutput + o*output_w*output_h; gradInput = gradInput + i*input_w*input_h; // compute gradInput for(yy = yy_start; yy < yy_end; yy+=yy_step) { for(xx = xx_start; xx < xx_end; xx+=xx_step) { float *ptr_gradInput = gradInput + yy*dH*input_w + xx*dW; float *ptr_gradOutput = gradOutput + yy*output_w + xx; float z = *ptr_gradOutput; int kx, ky; for(ky = 0; ky < kH; ky++) { for(kx = 0; kx < kW; kx++) ptr_gradInput[kx] += z / float(kW*kH); ptr_gradInput += input_w; } } } } /* * Description: * this function computes the gradInput from gradOutput * but with an atomic accumulation. It is needed to be done so * for cases of kH != dH and kW != dW */ __global__ void subgradinputAtomic(float *gradInput, float *gradOutput, int input_n, int input_h, int input_w, int kH, int kW, int dH, int dW) { // iterators int xx, yy; // output size int output_w = (input_w - kW) / dW + 1; int output_h = (input_h - kH) / dH + 1; // compute offsets based on thread/block ID int o = blockIdx.x; int i = o; int xx_start = threadIdx.x; int xx_end = output_w; int xx_step = blockDim.x; int yy_start = blockDim.y*blockIdx.y + threadIdx.y; int yy_end = output_h; int yy_step = blockDim.y*gridDim.y; // select input/output plane gradOutput = gradOutput + o*output_w*output_h; gradInput = gradInput + i*input_w*input_h; // compute gradInput for(yy = yy_start; yy < yy_end; yy+=yy_step) { for(xx = xx_start; xx < xx_end; xx+=xx_step) { float *ptr_gradInput = gradInput + yy*dH*input_w + xx*dW; float *ptr_gradOutput = gradOutput + yy*output_w + xx; float z = *ptr_gradOutput; int kx, ky; for(ky = 0; ky < kH; ky++) { for(kx = 0; kx < kW; kx++) { atomicAdd(&(ptr_gradInput[kx]), z / float(kW*kH)); } ptr_gradInput += input_w; } } } } static int cunn_SpatialAveragePooling_updateGradInput(lua_State *L) { THCState *state = getCutorchState(L); THCudaTensor *input = (THCudaTensor *)luaT_checkudata(L, 2, "torch.CudaTensor"); THCudaTensor *gradOutput = (THCudaTensor *)luaT_checkudata(L, 3, "torch.CudaTensor"); int kW = luaT_getfieldcheckint(L, 1, "kW"); int kH = luaT_getfieldcheckint(L, 1, "kH"); int dW = luaT_getfieldcheckint(L, 1, "dW"); int dH = luaT_getfieldcheckint(L, 1, "dH"); THCudaTensor *gradInput = (THCudaTensor *)luaT_getfieldcheckudata(L, 1, "gradInput", "torch.CudaTensor"); if (input->nDimension == 3) { long nInputCols = input->size[2]; long nInputRows = input->size[1]; long nInputPlane = input->size[0]; float *gradOutput_data = THCudaTensor_data(state, gradOutput); float *gradInput_data; THCudaTensor_resizeAs(state, gradInput, input); THCudaTensor_zero(state, gradInput); gradInput_data = THCudaTensor_data(state, gradInput); // cuda blocks & threads: int yblocks = (int)(16L / nInputPlane); yblocks = yblocks < 1 ? 1 : yblocks; dim3 blocks(nInputPlane,yblocks); dim3 threads(32,8); // run updateGradInput kernel if (kH == dH && kW == dW) { hipLaunchKernelGGL(( subgradinput) , dim3(blocks), dim3(threads), 0, 0, gradInput_data, gradOutput_data, nInputPlane, nInputRows, nInputCols, kH, kW, dH, dW); } else { hipLaunchKernelGGL(( subgradinputAtomic) , dim3(blocks), dim3(threads), 0, 0, gradInput_data, gradOutput_data, nInputPlane, nInputRows, nInputCols, kH, kW, dH, dW); } } else { long nInputCols = input->size[3]; long nInputRows = input->size[2]; long nInputPlane = input->size[1]; long nbatch = input->size[0]; float *gradOutput_data = THCudaTensor_data(state, gradOutput); float *gradInput_data; THCudaTensor_resizeAs(state, gradInput, input); THCudaTensor_zero(state, gradInput); gradInput_data = THCudaTensor_data(state, gradInput); // cuda blocks & threads: int yblocks = (int)(16L / nInputPlane); yblocks = yblocks < 1 ? 1 : yblocks; dim3 blocks(nInputPlane*nbatch,yblocks); dim3 threads(32,8); // run updateGradInput kernel if (kH == dH && kW == dW) { hipLaunchKernelGGL(( subgradinput) , dim3(blocks), dim3(threads), 0, 0, gradInput_data, gradOutput_data, nInputPlane, nInputRows, nInputCols, kH, kW, dH, dW); } else { hipLaunchKernelGGL(( subgradinputAtomic) , dim3(blocks), dim3(threads), 0, 0, gradInput_data, gradOutput_data, nInputPlane, nInputRows, nInputCols, kH, kW, dH, dW); } } // check for errors hipError_t err = hipGetLastError(); if (err != hipSuccess) { printf("error in SpatialAveragePooling.updateGradInput: %s\n", hipGetErrorString(err)); THError("aborting"); } return 1; } static const struct luaL_Reg cunn_SpatialAveragePooling__ [] = { {"SpatialAveragePooling_updateOutput", cunn_SpatialAveragePooling_updateOutput}, {"SpatialAveragePooling_updateGradInput", cunn_SpatialAveragePooling_updateGradInput}, {NULL, NULL} }; static void cunn_SpatialAveragePooling_init(lua_State *L) { luaT_pushmetatable(L, "torch.CudaTensor"); luaT_registeratname(L, cunn_SpatialAveragePooling__, "nn"); lua_pop(L,1); } #undef CUDA_MAX_THREADS
b8c0b3518c9d585663fe6bd966cc5637b3be0ba7.cu
#include "utils.h" #define CUDA_MAX_THREADS 1024 // this is safe, in reality 256 is our limit /* * Description: * this function avg-pools an input 3D tensor along dimensions 1 and 2 * 3D input, 3D output */ __global__ void subsample(float *input, float *output, int input_n, int input_h, int input_w, int kH, int kW, int dH, int dW) { // iterators int xx, yy; // output size int output_w = (input_w - kW) / dW + 1; int output_h = (input_h - kH) / dH + 1; // compute offsets based on thread/block ID int o = blockIdx.x; int i = o; int xx_start = threadIdx.x; int xx_end = output_w; int xx_step = blockDim.x; int yy_start = blockDim.y*blockIdx.y + threadIdx.y; int yy_end = output_h; int yy_step = blockDim.y*gridDim.y; // select input/output plane output = output + o*output_w*output_h; input = input + i*input_w*input_h; // For all output pixels... for(yy = yy_start; yy < yy_end; yy+=yy_step) { for(xx = xx_start; xx < xx_end; xx+=xx_step) { // Compute the mean of the input image... float *ptr_input = input + yy*dH*input_w + xx*dW; float *ptr_output = output + yy*output_w + xx; float sum = 0; int kx, ky; for(ky = 0; ky < kH; ky++) { for(kx = 0; kx < kW; kx++) sum += ptr_input[kx]; ptr_input += input_w; // next input line } // Update output *ptr_output = sum/float(kW*kH); } } } static int cunn_SpatialAveragePooling_updateOutput(lua_State *L) { THCState *state = getCutorchState(L); THCudaTensor *input = (THCudaTensor *)luaT_checkudata(L, 2, "torch.CudaTensor"); int kW = luaT_getfieldcheckint(L, 1, "kW"); int kH = luaT_getfieldcheckint(L, 1, "kH"); int dW = luaT_getfieldcheckint(L, 1, "dW"); int dH = luaT_getfieldcheckint(L, 1, "dH"); THCudaTensor *output = (THCudaTensor *)luaT_getfieldcheckudata(L, 1, "output", "torch.CudaTensor"); float *output_data; float *input_data; luaL_argcheck(L, input->nDimension == 3 || input->nDimension == 4, 2, "3D or 4D (batch) tensor expected"); if (input->nDimension == 3) { long nInputCols = input->size[2]; long nInputRows = input->size[1]; long nOutputCols = (nInputCols - kW) / dW + 1; long nOutputRows = (nInputRows - kH) / dH + 1; long nInputPlane = input->size[0]; luaL_argcheck(L, nInputCols >= kW && nInputRows >= kH, 2, "input image smaller than kernel size"); input = THCudaTensor_newContiguous(state, input); input_data = THCudaTensor_data(state, input); THCudaTensor_resize3d(state, output, nInputPlane, nOutputRows, nOutputCols); output_data = THCudaTensor_data(state, output); // cuda blocks & threads: int yblocks = (int)(16L / nInputPlane); yblocks = yblocks < 1 ? 1 : yblocks; dim3 blocks(nInputPlane,yblocks); dim3 threads(32,8); // run subsample kernel subsample <<<blocks, threads>>> (input_data, output_data, nInputPlane, nInputRows, nInputCols, kH, kW, dH, dW); } else { long nInputCols = input->size[3]; long nInputRows = input->size[2]; long nbatch = input->size[0]; long nOutputCols = (nInputCols - kW) / dW + 1; long nOutputRows = (nInputRows - kH) / dH + 1; long nInputPlane = input->size[1]; luaL_argcheck(L, nInputCols >= kW && nInputRows >= kH, 2, "input image smaller than kernel size"); input = THCudaTensor_newContiguous(state, input); input_data = THCudaTensor_data(state, input); THCudaTensor_resize4d(state, output, nbatch, nInputPlane, nOutputRows, nOutputCols); output_data = THCudaTensor_data(state, output); // cuda blocks & threads: int yblocks = (int)(16L / nInputPlane); yblocks = yblocks < 1 ? 1 : yblocks; dim3 blocks(nInputPlane*nbatch,yblocks); dim3 threads(32,8); // run subsample kernel subsample <<<blocks, threads>>> (input_data, output_data, nInputPlane, nInputRows, nInputCols, kH, kW, dH, dW); } // clean THCudaTensor_free(state, input); // check for errors cudaError_t err = cudaGetLastError(); if (err != cudaSuccess) { printf("error in SpatialAveragePooling.updateOutput: %s\n", cudaGetErrorString(err)); THError("aborting"); } return 1; } /* * Description: * this function computes the gradInput from gradOutput */ __global__ void subgradinput(float *gradInput, float *gradOutput, int input_n, int input_h, int input_w, int kH, int kW, int dH, int dW) { // iterators int xx, yy; // output size int output_w = (input_w - kW) / dW + 1; int output_h = (input_h - kH) / dH + 1; // compute offsets based on thread/block ID int o = blockIdx.x; int i = o; int xx_start = threadIdx.x; int xx_end = output_w; int xx_step = blockDim.x; int yy_start = blockDim.y*blockIdx.y + threadIdx.y; int yy_end = output_h; int yy_step = blockDim.y*gridDim.y; // select input/output plane gradOutput = gradOutput + o*output_w*output_h; gradInput = gradInput + i*input_w*input_h; // compute gradInput for(yy = yy_start; yy < yy_end; yy+=yy_step) { for(xx = xx_start; xx < xx_end; xx+=xx_step) { float *ptr_gradInput = gradInput + yy*dH*input_w + xx*dW; float *ptr_gradOutput = gradOutput + yy*output_w + xx; float z = *ptr_gradOutput; int kx, ky; for(ky = 0; ky < kH; ky++) { for(kx = 0; kx < kW; kx++) ptr_gradInput[kx] += z / float(kW*kH); ptr_gradInput += input_w; } } } } /* * Description: * this function computes the gradInput from gradOutput * but with an atomic accumulation. It is needed to be done so * for cases of kH != dH and kW != dW */ __global__ void subgradinputAtomic(float *gradInput, float *gradOutput, int input_n, int input_h, int input_w, int kH, int kW, int dH, int dW) { // iterators int xx, yy; // output size int output_w = (input_w - kW) / dW + 1; int output_h = (input_h - kH) / dH + 1; // compute offsets based on thread/block ID int o = blockIdx.x; int i = o; int xx_start = threadIdx.x; int xx_end = output_w; int xx_step = blockDim.x; int yy_start = blockDim.y*blockIdx.y + threadIdx.y; int yy_end = output_h; int yy_step = blockDim.y*gridDim.y; // select input/output plane gradOutput = gradOutput + o*output_w*output_h; gradInput = gradInput + i*input_w*input_h; // compute gradInput for(yy = yy_start; yy < yy_end; yy+=yy_step) { for(xx = xx_start; xx < xx_end; xx+=xx_step) { float *ptr_gradInput = gradInput + yy*dH*input_w + xx*dW; float *ptr_gradOutput = gradOutput + yy*output_w + xx; float z = *ptr_gradOutput; int kx, ky; for(ky = 0; ky < kH; ky++) { for(kx = 0; kx < kW; kx++) { atomicAdd(&(ptr_gradInput[kx]), z / float(kW*kH)); } ptr_gradInput += input_w; } } } } static int cunn_SpatialAveragePooling_updateGradInput(lua_State *L) { THCState *state = getCutorchState(L); THCudaTensor *input = (THCudaTensor *)luaT_checkudata(L, 2, "torch.CudaTensor"); THCudaTensor *gradOutput = (THCudaTensor *)luaT_checkudata(L, 3, "torch.CudaTensor"); int kW = luaT_getfieldcheckint(L, 1, "kW"); int kH = luaT_getfieldcheckint(L, 1, "kH"); int dW = luaT_getfieldcheckint(L, 1, "dW"); int dH = luaT_getfieldcheckint(L, 1, "dH"); THCudaTensor *gradInput = (THCudaTensor *)luaT_getfieldcheckudata(L, 1, "gradInput", "torch.CudaTensor"); if (input->nDimension == 3) { long nInputCols = input->size[2]; long nInputRows = input->size[1]; long nInputPlane = input->size[0]; float *gradOutput_data = THCudaTensor_data(state, gradOutput); float *gradInput_data; THCudaTensor_resizeAs(state, gradInput, input); THCudaTensor_zero(state, gradInput); gradInput_data = THCudaTensor_data(state, gradInput); // cuda blocks & threads: int yblocks = (int)(16L / nInputPlane); yblocks = yblocks < 1 ? 1 : yblocks; dim3 blocks(nInputPlane,yblocks); dim3 threads(32,8); // run updateGradInput kernel if (kH == dH && kW == dW) { subgradinput <<<blocks, threads>>> (gradInput_data, gradOutput_data, nInputPlane, nInputRows, nInputCols, kH, kW, dH, dW); } else { subgradinputAtomic <<<blocks, threads>>> (gradInput_data, gradOutput_data, nInputPlane, nInputRows, nInputCols, kH, kW, dH, dW); } } else { long nInputCols = input->size[3]; long nInputRows = input->size[2]; long nInputPlane = input->size[1]; long nbatch = input->size[0]; float *gradOutput_data = THCudaTensor_data(state, gradOutput); float *gradInput_data; THCudaTensor_resizeAs(state, gradInput, input); THCudaTensor_zero(state, gradInput); gradInput_data = THCudaTensor_data(state, gradInput); // cuda blocks & threads: int yblocks = (int)(16L / nInputPlane); yblocks = yblocks < 1 ? 1 : yblocks; dim3 blocks(nInputPlane*nbatch,yblocks); dim3 threads(32,8); // run updateGradInput kernel if (kH == dH && kW == dW) { subgradinput <<<blocks, threads>>> (gradInput_data, gradOutput_data, nInputPlane, nInputRows, nInputCols, kH, kW, dH, dW); } else { subgradinputAtomic <<<blocks, threads>>> (gradInput_data, gradOutput_data, nInputPlane, nInputRows, nInputCols, kH, kW, dH, dW); } } // check for errors cudaError_t err = cudaGetLastError(); if (err != cudaSuccess) { printf("error in SpatialAveragePooling.updateGradInput: %s\n", cudaGetErrorString(err)); THError("aborting"); } return 1; } static const struct luaL_Reg cunn_SpatialAveragePooling__ [] = { {"SpatialAveragePooling_updateOutput", cunn_SpatialAveragePooling_updateOutput}, {"SpatialAveragePooling_updateGradInput", cunn_SpatialAveragePooling_updateGradInput}, {NULL, NULL} }; static void cunn_SpatialAveragePooling_init(lua_State *L) { luaT_pushmetatable(L, "torch.CudaTensor"); luaT_registeratname(L, cunn_SpatialAveragePooling__, "nn"); lua_pop(L,1); } #undef CUDA_MAX_THREADS
c60c4da635724a337eecafff4af6e8d41d475526.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" // statistical kernel #define NUMBER_THREADS 512 __global__ void reduce222( long d_Ne, // number of elements in array int d_no, // number of sums to reduce int d_mul, // increment float *d_sums, // pointer to partial sums variable (DEVICE GLOBAL MEMORY) float *d_sums2){ // indexes int bx = blockIdx.x; // get current horizontal block index int tx = threadIdx.x; // get current horizontal thread index int ei = (bx*NUMBER_THREADS)+tx; // unique thread id, more threads than actual elements !!! int nf = NUMBER_THREADS-(gridDim.x*NUMBER_THREADS-d_no); // number of elements assigned to last block int df = 0; // divisibility factor for the last block // statistical __shared__ float d_psum[NUMBER_THREADS]; // data for block calculations allocated by every block in its shared memory __shared__ float d_psum2[NUMBER_THREADS]; // counters int i; // copy data to shared memory if(ei<d_no){ // do only for the number of elements, omit extra threads d_psum[tx] = d_sums[ei*d_mul]; d_psum2[tx] = d_sums2[ei*d_mul]; } // Lingjie Zhang modifited at Nov 1 / 2015 __syncthreads(); // end Lingjie Zhang's modification // reduction of sums if all blocks are full (rare case) if(nf == NUMBER_THREADS){ // sum of every 2, 4, ..., NUMBER_THREADS elements for(i=2; i<=NUMBER_THREADS; i=2*i){ // sum of elements if((tx+1) % i == 0){ // every ith d_psum[tx] = d_psum[tx] + d_psum[tx-i/2]; d_psum2[tx] = d_psum2[tx] + d_psum2[tx-i/2]; } // synchronization __syncthreads(); } // final sumation by last thread in every block if(tx==(NUMBER_THREADS-1)){ // block result stored in global memory d_sums[bx*d_mul*NUMBER_THREADS] = d_psum[tx]; d_sums2[bx*d_mul*NUMBER_THREADS] = d_psum2[tx]; } } // reduction of sums if last block is not full (common case) else{ // for full blocks (all except for last block) if(bx != (gridDim.x - 1)){ // // sum of every 2, 4, ..., NUMBER_THREADS elements for(i=2; i<=NUMBER_THREADS; i=2*i){ // // sum of elements if((tx+1) % i == 0){ // every ith d_psum[tx] = d_psum[tx] + d_psum[tx-i/2]; d_psum2[tx] = d_psum2[tx] + d_psum2[tx-i/2]; } // synchronization __syncthreads(); // } // final sumation by last thread in every block if(tx==(NUMBER_THREADS-1)){ // block result stored in global memory d_sums[bx*d_mul*NUMBER_THREADS] = d_psum[tx]; d_sums2[bx*d_mul*NUMBER_THREADS] = d_psum2[tx]; } } // for not full block (last block) else{ // // figure out divisibility for(i=2; i<=NUMBER_THREADS; i=2*i){ // if(nf >= i){ df = i; } } // sum of every 2, 4, ..., NUMBER_THREADS elements for(i=2; i<=df; i=2*i){ // // sum of elements (only busy threads) if((tx+1) % i == 0 && tx<df){ // every ith d_psum[tx] = d_psum[tx] + d_psum[tx-i/2]; d_psum2[tx] = d_psum2[tx] + d_psum2[tx-i/2]; } // synchronization (all threads) __syncthreads(); // } // remainder / final summation by last thread if(tx==(df-1)){ // // compute the remainder and final summation by last busy thread for(i=(bx*NUMBER_THREADS)+df; i<(bx*NUMBER_THREADS)+nf; i++){ // d_psum[tx] = d_psum[tx] + d_sums[i]; d_psum2[tx] = d_psum2[tx] + d_sums2[i]; } // final sumation by last thread in every block d_sums[bx*d_mul*NUMBER_THREADS] = d_psum[tx]; d_sums2[bx*d_mul*NUMBER_THREADS] = d_psum2[tx]; } } } }
c60c4da635724a337eecafff4af6e8d41d475526.cu
// statistical kernel #define NUMBER_THREADS 512 __global__ void reduce222( long d_Ne, // number of elements in array int d_no, // number of sums to reduce int d_mul, // increment float *d_sums, // pointer to partial sums variable (DEVICE GLOBAL MEMORY) float *d_sums2){ // indexes int bx = blockIdx.x; // get current horizontal block index int tx = threadIdx.x; // get current horizontal thread index int ei = (bx*NUMBER_THREADS)+tx; // unique thread id, more threads than actual elements !!! int nf = NUMBER_THREADS-(gridDim.x*NUMBER_THREADS-d_no); // number of elements assigned to last block int df = 0; // divisibility factor for the last block // statistical __shared__ float d_psum[NUMBER_THREADS]; // data for block calculations allocated by every block in its shared memory __shared__ float d_psum2[NUMBER_THREADS]; // counters int i; // copy data to shared memory if(ei<d_no){ // do only for the number of elements, omit extra threads d_psum[tx] = d_sums[ei*d_mul]; d_psum2[tx] = d_sums2[ei*d_mul]; } // Lingjie Zhang modifited at Nov 1 / 2015 __syncthreads(); // end Lingjie Zhang's modification // reduction of sums if all blocks are full (rare case) if(nf == NUMBER_THREADS){ // sum of every 2, 4, ..., NUMBER_THREADS elements for(i=2; i<=NUMBER_THREADS; i=2*i){ // sum of elements if((tx+1) % i == 0){ // every ith d_psum[tx] = d_psum[tx] + d_psum[tx-i/2]; d_psum2[tx] = d_psum2[tx] + d_psum2[tx-i/2]; } // synchronization __syncthreads(); } // final sumation by last thread in every block if(tx==(NUMBER_THREADS-1)){ // block result stored in global memory d_sums[bx*d_mul*NUMBER_THREADS] = d_psum[tx]; d_sums2[bx*d_mul*NUMBER_THREADS] = d_psum2[tx]; } } // reduction of sums if last block is not full (common case) else{ // for full blocks (all except for last block) if(bx != (gridDim.x - 1)){ // // sum of every 2, 4, ..., NUMBER_THREADS elements for(i=2; i<=NUMBER_THREADS; i=2*i){ // // sum of elements if((tx+1) % i == 0){ // every ith d_psum[tx] = d_psum[tx] + d_psum[tx-i/2]; d_psum2[tx] = d_psum2[tx] + d_psum2[tx-i/2]; } // synchronization __syncthreads(); // } // final sumation by last thread in every block if(tx==(NUMBER_THREADS-1)){ // block result stored in global memory d_sums[bx*d_mul*NUMBER_THREADS] = d_psum[tx]; d_sums2[bx*d_mul*NUMBER_THREADS] = d_psum2[tx]; } } // for not full block (last block) else{ // // figure out divisibility for(i=2; i<=NUMBER_THREADS; i=2*i){ // if(nf >= i){ df = i; } } // sum of every 2, 4, ..., NUMBER_THREADS elements for(i=2; i<=df; i=2*i){ // // sum of elements (only busy threads) if((tx+1) % i == 0 && tx<df){ // every ith d_psum[tx] = d_psum[tx] + d_psum[tx-i/2]; d_psum2[tx] = d_psum2[tx] + d_psum2[tx-i/2]; } // synchronization (all threads) __syncthreads(); // } // remainder / final summation by last thread if(tx==(df-1)){ // // compute the remainder and final summation by last busy thread for(i=(bx*NUMBER_THREADS)+df; i<(bx*NUMBER_THREADS)+nf; i++){ // d_psum[tx] = d_psum[tx] + d_sums[i]; d_psum2[tx] = d_psum2[tx] + d_sums2[i]; } // final sumation by last thread in every block d_sums[bx*d_mul*NUMBER_THREADS] = d_psum[tx]; d_sums2[bx*d_mul*NUMBER_THREADS] = d_psum2[tx]; } } } }
f42fb0f105bb6c168ea710d69dc4390476cc449b.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include <emmintrin.h> #include <sys/time.h> #include <stdio.h> const long N = 1000000; struct timeval start, end; void starttime() { gettimeofday( &start, 0 ); } void endtime(const char* c) { gettimeofday( &end, 0 ); double elapsed = ( end.tv_sec - start.tv_sec ) * 1000.0 + ( end.tv_usec - start.tv_usec ) / 1000.0; printf("%s: %f ms\n", c, elapsed); } void init(const char* c) { printf("***************** %s **********************\n", c); printf("Running %s...\n", c); starttime(); } void finish(int a, long N, const char* c) { endtime(c); printf("Done.\n"); printf("\nThere are %ld Prime numbers between 1 and %ld.", a, N); printf("***************************************************\n"); } int normal(int a, long N) { long low = 2, high = N, i, check; // printf("Prime numbers between 1 and %d are: ",high); while (low < high) { check = 0; for(i = 2; i <= low/2; ++i) { if(low % i == 0) { check = 1; break; } } if (check == 0) ++a; //printf("%d ", low); ++low; } return a; } // GPU function to countprime numbers // Every thread on every core runs this function __global__ void gpu_prime(int* a, long N) { // One element per thread on each core // blockIdx.x = Core # // blockDim.x = Threads per core // threadIdx.x = Thread # // The formula below makes sure the value of element // is different on every thread on every core long element = blockIdx.x*blockDim.x + threadIdx.x; // If there is not an event split, some threads will be // out of bounds // We just let those do nothing // The rest count the prime numbers if (element <= N && element >= 2) { int check = 0; for(int i = 2; i <= element/2; ++i) { if(element % i == 0) { check = 1; break; } } if (check == 0){ atomicAdd(a,1); } } } void gpu(int* a, long N) { int threadsPerCore = 512; // This can vary, up to 1024 long numCores = N / threadsPerCore + 1; // Memory must be on the graphics card int* gpuA; hipMalloc(&gpuA, sizeof(int)); // Allocate enough memory on the GPU hipMemcpy(gpuA, a, sizeof(int), hipMemcpyHostToDevice); hipLaunchKernelGGL(( gpu_prime), dim3(numCores), dim3(threadsPerCore), 0, 0, gpuA, N); hipMemcpy(a, gpuA, sizeof(int), hipMemcpyDeviceToHost); hipFree(&gpuA); // Free the memory on the GPU } int main() { int a = 1; // Test 1: Sequential For Loop init ("Normal"); a = normal(a, N); finish(a, N, "Normal"); // Test 2: GPU a = 1; init("GPU"); gpu(&a, N); finish(a, N, "GPU"); return 0; }
f42fb0f105bb6c168ea710d69dc4390476cc449b.cu
#include <emmintrin.h> #include <sys/time.h> #include <stdio.h> const long N = 1000000; struct timeval start, end; void starttime() { gettimeofday( &start, 0 ); } void endtime(const char* c) { gettimeofday( &end, 0 ); double elapsed = ( end.tv_sec - start.tv_sec ) * 1000.0 + ( end.tv_usec - start.tv_usec ) / 1000.0; printf("%s: %f ms\n", c, elapsed); } void init(const char* c) { printf("***************** %s **********************\n", c); printf("Running %s...\n", c); starttime(); } void finish(int a, long N, const char* c) { endtime(c); printf("Done.\n"); printf("\nThere are %ld Prime numbers between 1 and %ld.", a, N); printf("***************************************************\n"); } int normal(int a, long N) { long low = 2, high = N, i, check; // printf("Prime numbers between 1 and %d are: ",high); while (low < high) { check = 0; for(i = 2; i <= low/2; ++i) { if(low % i == 0) { check = 1; break; } } if (check == 0) ++a; //printf("%d ", low); ++low; } return a; } // GPU function to countprime numbers // Every thread on every core runs this function __global__ void gpu_prime(int* a, long N) { // One element per thread on each core // blockIdx.x = Core # // blockDim.x = Threads per core // threadIdx.x = Thread # // The formula below makes sure the value of element // is different on every thread on every core long element = blockIdx.x*blockDim.x + threadIdx.x; // If there is not an event split, some threads will be // out of bounds // We just let those do nothing // The rest count the prime numbers if (element <= N && element >= 2) { int check = 0; for(int i = 2; i <= element/2; ++i) { if(element % i == 0) { check = 1; break; } } if (check == 0){ atomicAdd(a,1); } } } void gpu(int* a, long N) { int threadsPerCore = 512; // This can vary, up to 1024 long numCores = N / threadsPerCore + 1; // Memory must be on the graphics card int* gpuA; cudaMalloc(&gpuA, sizeof(int)); // Allocate enough memory on the GPU cudaMemcpy(gpuA, a, sizeof(int), cudaMemcpyHostToDevice); gpu_prime<<<numCores, threadsPerCore>>>(gpuA, N); cudaMemcpy(a, gpuA, sizeof(int), cudaMemcpyDeviceToHost); cudaFree(&gpuA); // Free the memory on the GPU } int main() { int a = 1; // Test 1: Sequential For Loop init ("Normal"); a = normal(a, N); finish(a, N, "Normal"); // Test 2: GPU a = 1; init("GPU"); gpu(&a, N); finish(a, N, "GPU"); return 0; }
d921c0a3523b4a4c8ecffa1a051054288942ee38.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include <smat_cuda/cuda_errors.h> #include <smat_cuda/cuda_context.h> #include <smat_cuda/launch_util.h> #include <smat/vm/instruction_db.h> SM_NAMESPACE_BEGIN template <typename T> __global__ void kernel_apply_mask(T* A, const bool* M, unsigned size) { DECL_KERNEL_VARS for (unsigned i = bdx*bx + tx; i < size; i += bdx*gdx) if (!M[i]) A[i] = (T)0; } void execute_apply_mask(opcode_t opcode, const argument& A, const argument& M) { unsigned size = (unsigned)A.size(); launchcfg cfg = make_elemwise_launchcfg(size); if (A.dtype == f32)hipLaunchKernelGGL(( kernel_apply_mask), dim3(cfg.gdim),dim3(cfg.bdim),cfg.smem,cfg.stream, A.get<float* >(),M.get<bool*>(),(unsigned)size); else if (A.dtype == f64)hipLaunchKernelGGL(( kernel_apply_mask), dim3(cfg.gdim),dim3(cfg.bdim),cfg.smem,cfg.stream, A.get<double*>(),M.get<bool*>(),(unsigned)size); else { SM_UNIMPLEMENTED(); } } SM_NAMESPACE_END
d921c0a3523b4a4c8ecffa1a051054288942ee38.cu
#include <smat_cuda/cuda_errors.h> #include <smat_cuda/cuda_context.h> #include <smat_cuda/launch_util.h> #include <smat/vm/instruction_db.h> SM_NAMESPACE_BEGIN template <typename T> __global__ void kernel_apply_mask(T* A, const bool* M, unsigned size) { DECL_KERNEL_VARS for (unsigned i = bdx*bx + tx; i < size; i += bdx*gdx) if (!M[i]) A[i] = (T)0; } void execute_apply_mask(opcode_t opcode, const argument& A, const argument& M) { unsigned size = (unsigned)A.size(); launchcfg cfg = make_elemwise_launchcfg(size); if (A.dtype == f32) kernel_apply_mask<<<cfg.gdim,cfg.bdim,cfg.smem,cfg.stream>>>(A.get<float* >(),M.get<bool*>(),(unsigned)size); else if (A.dtype == f64) kernel_apply_mask<<<cfg.gdim,cfg.bdim,cfg.smem,cfg.stream>>>(A.get<double*>(),M.get<bool*>(),(unsigned)size); else { SM_UNIMPLEMENTED(); } } SM_NAMESPACE_END
6c75fdbf7cc553816729228cb889369d4d164c0b.hip
// !!! This is a file automatically generated by hipify!!! #define CUB_HALF_OPTIMIZATION 1 #include <benchmark/benchmark.h> #include <type_traits> #include <utility> #include <hip/hip_cooperative_groups.h> #include "init/init.hpp" #include "reduction/args.hpp" #include "utils/utils.hpp" #include "kernel_hip.cuh" using namespace wmma_reduction; template <typename Fun> struct is_function_ptr : std::integral_constant< bool, std::is_pointer<Fun>::value and std::is_function<typename std::remove_pointer<Fun>::type>::value> {}; template <typename Arg, typename... Args> static inline void collect_argument_addresses(void **collected_addresses, Arg &&arg, Args &&... args) { collected_addresses[0] = static_cast<void *>(&arg); collect_argument_addresses(collected_addresses + 1, std::forward<Args>(args)...); } template <typename... Args> static inline void **collect_arguments(Args &&... args) { void **argument_ptrs = (void **) malloc((sizeof...(Args)) * sizeof(void *)); collect_argument_addresses(argument_ptrs, std::forward<Args>(args)...); return argument_ptrs; } template <size_t SEGMENT_SIZE, int WARPS_PER_BLOCK> void tryCUDA_WMMA_FULL_REDUCTION_CG(benchmark::State &state) { const size_t num_elements = state.range(0); if (num_elements % SEGMENT_SIZE) { state.SkipWithError("num_elements must be multiples of SEGMENT_SIZE"); return; } size_t num_segments = (num_elements + SEGMENT_SIZE - 1) / SEGMENT_SIZE; const int BLOCK_DIM = WARPS_PER_BLOCK * WARP_SIZE; half *d_in_fp16 = nullptr; half *d_out = nullptr; dim3 gridDim, blockDim; blockDim.x = BLOCK_DIM; gridDim.x = (num_segments + WARPS_PER_BLOCK - 1) / WARPS_PER_BLOCK; if (gridDim.x >= CUDA_MAX_GRID_SIZE) { state.SkipWithError( fmt::format("gridDim.x={} is greater than CUDA_MAX_GRID_SIZE", gridDim.x) .c_str()); return; } PRINT_IF_ERROR(hipMalloc(&d_in_fp16, num_elements * sizeof(half))); PRINT_IF_ERROR(hipMalloc(&d_out, gridDim.x * sizeof(half))); PRINT_IF_ERROR(hipMemset(d_out, 0, gridDim.x * sizeof(half))); cuda_memory_set(d_in_fp16, 0.001f, num_elements); hipEvent_t start, stop; PRINT_IF_ERROR(hipEventCreate(&start)); PRINT_IF_ERROR(hipEventCreate(&stop)); defer(hipEventDestroy(start)); defer(hipEventDestroy(stop)); #if 0 const auto params = collect_arguments(d_in_fp16, d_out, num_segments, SEGMENT_SIZE); defer(free(params)); #else void *params[] = {(void *) &d_in_fp16, (void *) &d_out, (void *) &num_segments}; #endif int maxActiveBlocks; hipOccupancyMaxActiveBlocksPerMultiprocessor( &maxActiveBlocks, (void *) &compute_wmma_reduction_cg<WARPS_PER_BLOCK, BLOCK_DIM>, blockDim.x, 0); // printf("gridDim = %d maxActiveBlocks = %d\n",gridDim.x, // maxActiveBlocks); try { for (auto _ : state) { PRINT_IF_ERROR(hipMemset(d_out, 0, gridDim.x * sizeof(half))); PRINT_IF_ERROR(hipEventRecord(start)); hipLaunchCooperativeKernel( (const void *) &compute_wmma_reduction_cg<SEGMENT_SIZE, WARPS_PER_BLOCK, BLOCK_DIM>, gridDim, blockDim, params); PRINT_IF_ERROR(hipEventRecord(stop)); PRINT_IF_ERROR(hipEventSynchronize(stop)); state.PauseTiming(); float msecTotal = 0.0f; PRINT_IF_ERROR(hipEventElapsedTime(&msecTotal, start, stop)); state.SetIterationTime(msecTotal / 1000); state.ResumeTiming(); } state.counters.insert({{"num_elements", num_elements}, {"num_segments", num_segments}, {"segment_size", SEGMENT_SIZE}, {"warps_per_block", WARPS_PER_BLOCK}, {"flops", {state.iterations() * 1.0 * num_elements, benchmark::Counter::kAvgThreadsRate}}}); #if 0 half h_out; PRINT_IF_ERROR( hipMemcpy(&h_out, d_out, 1 * sizeof(half), hipMemcpyDeviceToHost)); int errors = 0; float correct_sum = 0; for (int i = 0; i < num_elements; i++) { correct_sum += h_in[i]; } if (fabs(half_to_float(h_out) - correct_sum) > 0.001) { errors++; printf("Expected Reuction = %f, got h_out = %f\n", correct_sum, half_to_float(h_out)); } if (errors > 0) { printf( "CUDA_WMMA_FULL_REDUCTION_CG does not agree with SEQUENTIAL! %d errors!\n", errors); } else { printf("Results verified: they agree.\n\n"); } #endif hipFree(d_in_fp16); hipFree(d_out); } catch (...) { hipFree(d_in_fp16); hipFree(d_out); hipDeviceReset(); const auto p = std::current_exception(); std::rethrow_exception(p); } } template <int SEGMENT_SIZE, int WARPS_PER_BLOCK> void CUDA_WMMA_FULL_REDUCTION_CG(benchmark::State &state) { hipDeviceReset(); try { tryCUDA_WMMA_FULL_REDUCTION_CG<SEGMENT_SIZE, WARPS_PER_BLOCK>(state); } catch (const std::exception &e) { state.SkipWithError(e.what()); } catch (const std::string &e) { state.SkipWithError(e.c_str()); } catch (...) { state.SkipWithError("unknown exception"); } } #define BENCHMARK_REDUCTION0(SEGMENT_SIZE, WARPS_PER_BLOCK) \ BENCHMARK_TEMPLATE(CUDA_WMMA_FULL_REDUCTION_CG, SEGMENT_SIZE, WARPS_PER_BLOCK) \ ->ARGS() \ ->UseManualTime() #define BENCHMARK_REDUCTION(SEGMENT_SIZE) \ BENCHMARK_REDUCTION0(SEGMENT_SIZE, 1); \ BENCHMARK_REDUCTION0(SEGMENT_SIZE, 2); \ BENCHMARK_REDUCTION0(SEGMENT_SIZE, 4); \ BENCHMARK_REDUCTION0(SEGMENT_SIZE, 8); \ BENCHMARK_REDUCTION0(SEGMENT_SIZE, 16) #if 0 // disabled BENCHMARK_REDUCTION(256); BENCHMARK_REDUCTION(2 * 256); BENCHMARK_REDUCTION(4 * 256); BENCHMARK_REDUCTION(8 * 256); #if 0 // uses too much shared mem BENCHMARK_REDUCTION(16 * 256); BENCHMARK_REDUCTION(32 * 256); BENCHMARK_REDUCTION(64 * 256); BENCHMARK_REDUCTION(128 * 256); BENCHMARK_REDUCTION(256 * 256); BENCHMARK_REDUCTION(512 * 256); BENCHMARK_REDUCTION(1024 * 256); #endif #endif
6c75fdbf7cc553816729228cb889369d4d164c0b.cu
#define CUB_HALF_OPTIMIZATION 1 #include <benchmark/benchmark.h> #include <type_traits> #include <utility> #include <cooperative_groups.h> #include "init/init.hpp" #include "reduction/args.hpp" #include "utils/utils.hpp" #include "kernel.cuh" using namespace wmma_reduction; template <typename Fun> struct is_function_ptr : std::integral_constant< bool, std::is_pointer<Fun>::value and std::is_function<typename std::remove_pointer<Fun>::type>::value> {}; template <typename Arg, typename... Args> static inline void collect_argument_addresses(void **collected_addresses, Arg &&arg, Args &&... args) { collected_addresses[0] = static_cast<void *>(&arg); collect_argument_addresses(collected_addresses + 1, std::forward<Args>(args)...); } template <typename... Args> static inline void **collect_arguments(Args &&... args) { void **argument_ptrs = (void **) malloc((sizeof...(Args)) * sizeof(void *)); collect_argument_addresses(argument_ptrs, std::forward<Args>(args)...); return argument_ptrs; } template <size_t SEGMENT_SIZE, int WARPS_PER_BLOCK> void tryCUDA_WMMA_FULL_REDUCTION_CG(benchmark::State &state) { const size_t num_elements = state.range(0); if (num_elements % SEGMENT_SIZE) { state.SkipWithError("num_elements must be multiples of SEGMENT_SIZE"); return; } size_t num_segments = (num_elements + SEGMENT_SIZE - 1) / SEGMENT_SIZE; const int BLOCK_DIM = WARPS_PER_BLOCK * WARP_SIZE; half *d_in_fp16 = nullptr; half *d_out = nullptr; dim3 gridDim, blockDim; blockDim.x = BLOCK_DIM; gridDim.x = (num_segments + WARPS_PER_BLOCK - 1) / WARPS_PER_BLOCK; if (gridDim.x >= CUDA_MAX_GRID_SIZE) { state.SkipWithError( fmt::format("gridDim.x={} is greater than CUDA_MAX_GRID_SIZE", gridDim.x) .c_str()); return; } PRINT_IF_ERROR(cudaMalloc(&d_in_fp16, num_elements * sizeof(half))); PRINT_IF_ERROR(cudaMalloc(&d_out, gridDim.x * sizeof(half))); PRINT_IF_ERROR(cudaMemset(d_out, 0, gridDim.x * sizeof(half))); cuda_memory_set(d_in_fp16, 0.001f, num_elements); cudaEvent_t start, stop; PRINT_IF_ERROR(cudaEventCreate(&start)); PRINT_IF_ERROR(cudaEventCreate(&stop)); defer(cudaEventDestroy(start)); defer(cudaEventDestroy(stop)); #if 0 const auto params = collect_arguments(d_in_fp16, d_out, num_segments, SEGMENT_SIZE); defer(free(params)); #else void *params[] = {(void *) &d_in_fp16, (void *) &d_out, (void *) &num_segments}; #endif int maxActiveBlocks; cudaOccupancyMaxActiveBlocksPerMultiprocessor( &maxActiveBlocks, (void *) &compute_wmma_reduction_cg<WARPS_PER_BLOCK, BLOCK_DIM>, blockDim.x, 0); // printf("gridDim = %d maxActiveBlocks = %d\n",gridDim.x, // maxActiveBlocks); try { for (auto _ : state) { PRINT_IF_ERROR(cudaMemset(d_out, 0, gridDim.x * sizeof(half))); PRINT_IF_ERROR(cudaEventRecord(start)); cudaLaunchCooperativeKernel( (const void *) &compute_wmma_reduction_cg<SEGMENT_SIZE, WARPS_PER_BLOCK, BLOCK_DIM>, gridDim, blockDim, params); PRINT_IF_ERROR(cudaEventRecord(stop)); PRINT_IF_ERROR(cudaEventSynchronize(stop)); state.PauseTiming(); float msecTotal = 0.0f; PRINT_IF_ERROR(cudaEventElapsedTime(&msecTotal, start, stop)); state.SetIterationTime(msecTotal / 1000); state.ResumeTiming(); } state.counters.insert({{"num_elements", num_elements}, {"num_segments", num_segments}, {"segment_size", SEGMENT_SIZE}, {"warps_per_block", WARPS_PER_BLOCK}, {"flops", {state.iterations() * 1.0 * num_elements, benchmark::Counter::kAvgThreadsRate}}}); #if 0 half h_out; PRINT_IF_ERROR( cudaMemcpy(&h_out, d_out, 1 * sizeof(half), cudaMemcpyDeviceToHost)); int errors = 0; float correct_sum = 0; for (int i = 0; i < num_elements; i++) { correct_sum += h_in[i]; } if (fabs(half_to_float(h_out) - correct_sum) > 0.001) { errors++; printf("Expected Reuction = %f, got h_out = %f\n", correct_sum, half_to_float(h_out)); } if (errors > 0) { printf( "CUDA_WMMA_FULL_REDUCTION_CG does not agree with SEQUENTIAL! %d errors!\n", errors); } else { printf("Results verified: they agree.\n\n"); } #endif cudaFree(d_in_fp16); cudaFree(d_out); } catch (...) { cudaFree(d_in_fp16); cudaFree(d_out); cudaDeviceReset(); const auto p = std::current_exception(); std::rethrow_exception(p); } } template <int SEGMENT_SIZE, int WARPS_PER_BLOCK> void CUDA_WMMA_FULL_REDUCTION_CG(benchmark::State &state) { cudaDeviceReset(); try { tryCUDA_WMMA_FULL_REDUCTION_CG<SEGMENT_SIZE, WARPS_PER_BLOCK>(state); } catch (const std::exception &e) { state.SkipWithError(e.what()); } catch (const std::string &e) { state.SkipWithError(e.c_str()); } catch (...) { state.SkipWithError("unknown exception"); } } #define BENCHMARK_REDUCTION0(SEGMENT_SIZE, WARPS_PER_BLOCK) \ BENCHMARK_TEMPLATE(CUDA_WMMA_FULL_REDUCTION_CG, SEGMENT_SIZE, WARPS_PER_BLOCK) \ ->ARGS() \ ->UseManualTime() #define BENCHMARK_REDUCTION(SEGMENT_SIZE) \ BENCHMARK_REDUCTION0(SEGMENT_SIZE, 1); \ BENCHMARK_REDUCTION0(SEGMENT_SIZE, 2); \ BENCHMARK_REDUCTION0(SEGMENT_SIZE, 4); \ BENCHMARK_REDUCTION0(SEGMENT_SIZE, 8); \ BENCHMARK_REDUCTION0(SEGMENT_SIZE, 16) #if 0 // disabled BENCHMARK_REDUCTION(256); BENCHMARK_REDUCTION(2 * 256); BENCHMARK_REDUCTION(4 * 256); BENCHMARK_REDUCTION(8 * 256); #if 0 // uses too much shared mem BENCHMARK_REDUCTION(16 * 256); BENCHMARK_REDUCTION(32 * 256); BENCHMARK_REDUCTION(64 * 256); BENCHMARK_REDUCTION(128 * 256); BENCHMARK_REDUCTION(256 * 256); BENCHMARK_REDUCTION(512 * 256); BENCHMARK_REDUCTION(1024 * 256); #endif #endif